Fix dot dump bug
[official-gcc.git] / gcc / simplify-rtx.c
blob181b56fb8c002b85fcdc528c2ff2f39d09c18878
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "ggc.h"
38 #include "target.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
45 signed wide int. */
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49 static rtx neg_const_int (enum machine_mode, const_rtx);
50 static bool plus_minus_operand_p (const_rtx);
51 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
52 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
53 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
54 unsigned int);
55 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
56 rtx, rtx);
57 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
58 enum machine_mode, rtx, rtx);
59 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
60 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
61 rtx, rtx, rtx, rtx);
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
65 static rtx
66 neg_const_int (enum machine_mode mode, const_rtx i)
68 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
74 bool
75 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 unsigned HOST_WIDE_INT val;
78 unsigned int width;
80 if (GET_MODE_CLASS (mode) != MODE_INT)
81 return false;
83 width = GET_MODE_PRECISION (mode);
84 if (width == 0)
85 return false;
87 if (width <= HOST_BITS_PER_WIDE_INT
88 && CONST_INT_P (x))
89 val = INTVAL (x);
90 #if TARGET_SUPPORTS_WIDE_INT
91 else if (CONST_WIDE_INT_P (x))
93 unsigned int i;
94 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
95 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
96 return false;
97 for (i = 0; i < elts - 1; i++)
98 if (CONST_WIDE_INT_ELT (x, i) != 0)
99 return false;
100 val = CONST_WIDE_INT_ELT (x, elts - 1);
101 width %= HOST_BITS_PER_WIDE_INT;
102 if (width == 0)
103 width = HOST_BITS_PER_WIDE_INT;
105 #else
106 else if (width <= HOST_BITS_PER_DOUBLE_INT
107 && CONST_DOUBLE_AS_INT_P (x)
108 && CONST_DOUBLE_LOW (x) == 0)
110 val = CONST_DOUBLE_HIGH (x);
111 width -= HOST_BITS_PER_WIDE_INT;
113 #endif
114 else
115 /* X is not an integer constant. */
116 return false;
118 if (width < HOST_BITS_PER_WIDE_INT)
119 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
120 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
123 /* Test whether VAL is equal to the most significant bit of mode MODE
124 (after masking with the mode mask of MODE). Returns false if the
125 precision of MODE is too large to handle. */
127 bool
128 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 unsigned int width;
132 if (GET_MODE_CLASS (mode) != MODE_INT)
133 return false;
135 width = GET_MODE_PRECISION (mode);
136 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137 return false;
139 val &= GET_MODE_MASK (mode);
140 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
143 /* Test whether the most significant bit of mode MODE is set in VAL.
144 Returns false if the precision of MODE is too large to handle. */
145 bool
146 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 unsigned int width;
150 if (GET_MODE_CLASS (mode) != MODE_INT)
151 return false;
153 width = GET_MODE_PRECISION (mode);
154 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155 return false;
157 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158 return val != 0;
161 /* Test whether the most significant bit of mode MODE is clear in VAL.
162 Returns false if the precision of MODE is too large to handle. */
163 bool
164 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
166 unsigned int width;
168 if (GET_MODE_CLASS (mode) != MODE_INT)
169 return false;
171 width = GET_MODE_PRECISION (mode);
172 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
173 return false;
175 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
176 return val == 0;
179 /* Make a binary operation by properly ordering the operands and
180 seeing if the expression folds. */
183 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
184 rtx op1)
186 rtx tem;
188 /* If this simplifies, do it. */
189 tem = simplify_binary_operation (code, mode, op0, op1);
190 if (tem)
191 return tem;
193 /* Put complex operands first and constants second if commutative. */
194 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
195 && swap_commutative_operands_p (op0, op1))
196 tem = op0, op0 = op1, op1 = tem;
198 return gen_rtx_fmt_ee (code, mode, op0, op1);
201 /* If X is a MEM referencing the constant pool, return the real value.
202 Otherwise return X. */
204 avoid_constant_pool_reference (rtx x)
206 rtx c, tmp, addr;
207 enum machine_mode cmode;
208 HOST_WIDE_INT offset = 0;
210 switch (GET_CODE (x))
212 case MEM:
213 break;
215 case FLOAT_EXTEND:
216 /* Handle float extensions of constant pool references. */
217 tmp = XEXP (x, 0);
218 c = avoid_constant_pool_reference (tmp);
219 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
221 REAL_VALUE_TYPE d;
223 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
224 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
226 return x;
228 default:
229 return x;
232 if (GET_MODE (x) == BLKmode)
233 return x;
235 addr = XEXP (x, 0);
237 /* Call target hook to avoid the effects of -fpic etc.... */
238 addr = targetm.delegitimize_address (addr);
240 /* Split the address into a base and integer offset. */
241 if (GET_CODE (addr) == CONST
242 && GET_CODE (XEXP (addr, 0)) == PLUS
243 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
245 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
246 addr = XEXP (XEXP (addr, 0), 0);
249 if (GET_CODE (addr) == LO_SUM)
250 addr = XEXP (addr, 1);
252 /* If this is a constant pool reference, we can turn it into its
253 constant and hope that simplifications happen. */
254 if (GET_CODE (addr) == SYMBOL_REF
255 && CONSTANT_POOL_ADDRESS_P (addr))
257 c = get_pool_constant (addr);
258 cmode = get_pool_mode (addr);
260 /* If we're accessing the constant in a different mode than it was
261 originally stored, attempt to fix that up via subreg simplifications.
262 If that fails we have no choice but to return the original memory. */
263 if ((offset != 0 || cmode != GET_MODE (x))
264 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
266 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
267 if (tem && CONSTANT_P (tem))
268 return tem;
270 else
271 return c;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 enum machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, volatilep = 0;
315 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
316 &mode, &unsignedp, &volatilep, false);
317 if (bitsize != GET_MODE_BITSIZE (mode)
318 || (bitpos % BITS_PER_UNIT)
319 || (toffset && !tree_fits_shwi_p (toffset)))
320 decl = NULL;
321 else
323 offset += bitpos / BITS_PER_UNIT;
324 if (toffset)
325 offset += tree_to_shwi (toffset);
327 break;
331 if (decl
332 && mode == GET_MODE (x)
333 && TREE_CODE (decl) == VAR_DECL
334 && (TREE_STATIC (decl)
335 || DECL_THREAD_LOCAL_P (decl))
336 && DECL_RTL_SET_P (decl)
337 && MEM_P (DECL_RTL (decl)))
339 rtx newx;
341 offset += MEM_OFFSET (x);
343 newx = DECL_RTL (decl);
345 if (MEM_P (newx))
347 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
349 /* Avoid creating a new MEM needlessly if we already had
350 the same address. We do if there's no OFFSET and the
351 old address X is identical to NEWX, or if X is of the
352 form (plus NEWX OFFSET), or the NEWX is of the form
353 (plus Y (const_int Z)) and X is that with the offset
354 added: (plus Y (const_int Z+OFFSET)). */
355 if (!((offset == 0
356 || (GET_CODE (o) == PLUS
357 && GET_CODE (XEXP (o, 1)) == CONST_INT
358 && (offset == INTVAL (XEXP (o, 1))
359 || (GET_CODE (n) == PLUS
360 && GET_CODE (XEXP (n, 1)) == CONST_INT
361 && (INTVAL (XEXP (n, 1)) + offset
362 == INTVAL (XEXP (o, 1)))
363 && (n = XEXP (n, 0))))
364 && (o = XEXP (o, 0))))
365 && rtx_equal_p (o, n)))
366 x = adjust_address_nv (newx, mode, offset);
368 else if (GET_MODE (x) == GET_MODE (newx)
369 && offset == 0)
370 x = newx;
374 return x;
377 /* Make a unary operation by first seeing if it folds and otherwise making
378 the specified operation. */
381 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
382 enum machine_mode op_mode)
384 rtx tem;
386 /* If this simplifies, use it. */
387 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
388 return tem;
390 return gen_rtx_fmt_e (code, mode, op);
393 /* Likewise for ternary operations. */
396 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
399 rtx tem;
401 /* If this simplifies, use it. */
402 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
403 op0, op1, op2)))
404 return tem;
406 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
409 /* Likewise, for relational operations.
410 CMP_MODE specifies mode comparison is done in. */
413 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
414 enum machine_mode cmp_mode, rtx op0, rtx op1)
416 rtx tem;
418 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
419 op0, op1)))
420 return tem;
422 return gen_rtx_fmt_ee (code, mode, op0, op1);
425 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
426 and simplify the result. If FN is non-NULL, call this callback on each
427 X, if it returns non-NULL, replace X with its return value and simplify the
428 result. */
431 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
432 rtx (*fn) (rtx, const_rtx, void *), void *data)
434 enum rtx_code code = GET_CODE (x);
435 enum machine_mode mode = GET_MODE (x);
436 enum machine_mode op_mode;
437 const char *fmt;
438 rtx op0, op1, op2, newx, op;
439 rtvec vec, newvec;
440 int i, j;
442 if (__builtin_expect (fn != NULL, 0))
444 newx = fn (x, old_rtx, data);
445 if (newx)
446 return newx;
448 else if (rtx_equal_p (x, old_rtx))
449 return copy_rtx ((rtx) data);
451 switch (GET_RTX_CLASS (code))
453 case RTX_UNARY:
454 op0 = XEXP (x, 0);
455 op_mode = GET_MODE (op0);
456 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457 if (op0 == XEXP (x, 0))
458 return x;
459 return simplify_gen_unary (code, mode, op0, op_mode);
461 case RTX_BIN_ARITH:
462 case RTX_COMM_ARITH:
463 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
464 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
465 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
466 return x;
467 return simplify_gen_binary (code, mode, op0, op1);
469 case RTX_COMPARE:
470 case RTX_COMM_COMPARE:
471 op0 = XEXP (x, 0);
472 op1 = XEXP (x, 1);
473 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
474 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
475 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
476 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
477 return x;
478 return simplify_gen_relational (code, mode, op_mode, op0, op1);
480 case RTX_TERNARY:
481 case RTX_BITFIELD_OPS:
482 op0 = XEXP (x, 0);
483 op_mode = GET_MODE (op0);
484 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
485 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
486 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
487 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
488 return x;
489 if (op_mode == VOIDmode)
490 op_mode = GET_MODE (op0);
491 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
493 case RTX_EXTRA:
494 if (code == SUBREG)
496 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
497 if (op0 == SUBREG_REG (x))
498 return x;
499 op0 = simplify_gen_subreg (GET_MODE (x), op0,
500 GET_MODE (SUBREG_REG (x)),
501 SUBREG_BYTE (x));
502 return op0 ? op0 : x;
504 break;
506 case RTX_OBJ:
507 if (code == MEM)
509 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
510 if (op0 == XEXP (x, 0))
511 return x;
512 return replace_equiv_address_nv (x, op0);
514 else if (code == LO_SUM)
516 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
517 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
519 /* (lo_sum (high x) x) -> x */
520 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
521 return op1;
523 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
524 return x;
525 return gen_rtx_LO_SUM (mode, op0, op1);
527 break;
529 default:
530 break;
533 newx = x;
534 fmt = GET_RTX_FORMAT (code);
535 for (i = 0; fmt[i]; i++)
536 switch (fmt[i])
538 case 'E':
539 vec = XVEC (x, i);
540 newvec = XVEC (newx, i);
541 for (j = 0; j < GET_NUM_ELEM (vec); j++)
543 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
544 old_rtx, fn, data);
545 if (op != RTVEC_ELT (vec, j))
547 if (newvec == vec)
549 newvec = shallow_copy_rtvec (vec);
550 if (x == newx)
551 newx = shallow_copy_rtx (x);
552 XVEC (newx, i) = newvec;
554 RTVEC_ELT (newvec, j) = op;
557 break;
559 case 'e':
560 if (XEXP (x, i))
562 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
563 if (op != XEXP (x, i))
565 if (x == newx)
566 newx = shallow_copy_rtx (x);
567 XEXP (newx, i) = op;
570 break;
572 return newx;
575 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
576 resulting RTX. Return a new RTX which is as simplified as possible. */
579 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
581 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
584 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
585 Only handle cases where the truncated value is inherently an rvalue.
587 RTL provides two ways of truncating a value:
589 1. a lowpart subreg. This form is only a truncation when both
590 the outer and inner modes (here MODE and OP_MODE respectively)
591 are scalar integers, and only then when the subreg is used as
592 an rvalue.
594 It is only valid to form such truncating subregs if the
595 truncation requires no action by the target. The onus for
596 proving this is on the creator of the subreg -- e.g. the
597 caller to simplify_subreg or simplify_gen_subreg -- and typically
598 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
600 2. a TRUNCATE. This form handles both scalar and compound integers.
602 The first form is preferred where valid. However, the TRUNCATE
603 handling in simplify_unary_operation turns the second form into the
604 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
605 so it is generally safe to form rvalue truncations using:
607 simplify_gen_unary (TRUNCATE, ...)
609 and leave simplify_unary_operation to work out which representation
610 should be used.
612 Because of the proof requirements on (1), simplify_truncation must
613 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
614 regardless of whether the outer truncation came from a SUBREG or a
615 TRUNCATE. For example, if the caller has proven that an SImode
616 truncation of:
618 (and:DI X Y)
620 is a no-op and can be represented as a subreg, it does not follow
621 that SImode truncations of X and Y are also no-ops. On a target
622 like 64-bit MIPS that requires SImode values to be stored in
623 sign-extended form, an SImode truncation of:
625 (and:DI (reg:DI X) (const_int 63))
627 is trivially a no-op because only the lower 6 bits can be set.
628 However, X is still an arbitrary 64-bit number and so we cannot
629 assume that truncating it too is a no-op. */
631 static rtx
632 simplify_truncation (enum machine_mode mode, rtx op,
633 enum machine_mode op_mode)
635 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
636 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
637 gcc_assert (precision <= op_precision);
639 /* Optimize truncations of zero and sign extended values. */
640 if (GET_CODE (op) == ZERO_EXTEND
641 || GET_CODE (op) == SIGN_EXTEND)
643 /* There are three possibilities. If MODE is the same as the
644 origmode, we can omit both the extension and the subreg.
645 If MODE is not larger than the origmode, we can apply the
646 truncation without the extension. Finally, if the outermode
647 is larger than the origmode, we can just extend to the appropriate
648 mode. */
649 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
650 if (mode == origmode)
651 return XEXP (op, 0);
652 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
653 return simplify_gen_unary (TRUNCATE, mode,
654 XEXP (op, 0), origmode);
655 else
656 return simplify_gen_unary (GET_CODE (op), mode,
657 XEXP (op, 0), origmode);
660 /* If the machine can perform operations in the truncated mode, distribute
661 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
662 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
663 if (1
664 #ifdef WORD_REGISTER_OPERATIONS
665 && precision >= BITS_PER_WORD
666 #endif
667 && (GET_CODE (op) == PLUS
668 || GET_CODE (op) == MINUS
669 || GET_CODE (op) == MULT))
671 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
672 if (op0)
674 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
675 if (op1)
676 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
680 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
681 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
682 the outer subreg is effectively a truncation to the original mode. */
683 if ((GET_CODE (op) == LSHIFTRT
684 || GET_CODE (op) == ASHIFTRT)
685 /* Ensure that OP_MODE is at least twice as wide as MODE
686 to avoid the possibility that an outer LSHIFTRT shifts by more
687 than the sign extension's sign_bit_copies and introduces zeros
688 into the high bits of the result. */
689 && 2 * precision <= op_precision
690 && CONST_INT_P (XEXP (op, 1))
691 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
692 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
693 && UINTVAL (XEXP (op, 1)) < precision)
694 return simplify_gen_binary (ASHIFTRT, mode,
695 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
697 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
698 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
699 the outer subreg is effectively a truncation to the original mode. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 && CONST_INT_P (XEXP (op, 1))
703 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
704 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
705 && UINTVAL (XEXP (op, 1)) < precision)
706 return simplify_gen_binary (LSHIFTRT, mode,
707 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
709 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
710 to (ashift:QI (x:QI) C), where C is a suitable small constant and
711 the outer subreg is effectively a truncation to the original mode. */
712 if (GET_CODE (op) == ASHIFT
713 && CONST_INT_P (XEXP (op, 1))
714 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
715 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
717 && UINTVAL (XEXP (op, 1)) < precision)
718 return simplify_gen_binary (ASHIFT, mode,
719 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
721 /* Recognize a word extraction from a multi-word subreg. */
722 if ((GET_CODE (op) == LSHIFTRT
723 || GET_CODE (op) == ASHIFTRT)
724 && SCALAR_INT_MODE_P (mode)
725 && SCALAR_INT_MODE_P (op_mode)
726 && precision >= BITS_PER_WORD
727 && 2 * precision <= op_precision
728 && CONST_INT_P (XEXP (op, 1))
729 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
730 && UINTVAL (XEXP (op, 1)) < op_precision)
732 int byte = subreg_lowpart_offset (mode, op_mode);
733 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
734 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
735 (WORDS_BIG_ENDIAN
736 ? byte - shifted_bytes
737 : byte + shifted_bytes));
740 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
741 and try replacing the TRUNCATE and shift with it. Don't do this
742 if the MEM has a mode-dependent address. */
743 if ((GET_CODE (op) == LSHIFTRT
744 || GET_CODE (op) == ASHIFTRT)
745 && SCALAR_INT_MODE_P (op_mode)
746 && MEM_P (XEXP (op, 0))
747 && CONST_INT_P (XEXP (op, 1))
748 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
749 && INTVAL (XEXP (op, 1)) > 0
750 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
751 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
752 MEM_ADDR_SPACE (XEXP (op, 0)))
753 && ! MEM_VOLATILE_P (XEXP (op, 0))
754 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
755 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
757 int byte = subreg_lowpart_offset (mode, op_mode);
758 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
759 return adjust_address_nv (XEXP (op, 0), mode,
760 (WORDS_BIG_ENDIAN
761 ? byte - shifted_bytes
762 : byte + shifted_bytes));
765 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
766 (OP:SI foo:SI) if OP is NEG or ABS. */
767 if ((GET_CODE (op) == ABS
768 || GET_CODE (op) == NEG)
769 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
770 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
771 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
772 return simplify_gen_unary (GET_CODE (op), mode,
773 XEXP (XEXP (op, 0), 0), mode);
775 /* (truncate:A (subreg:B (truncate:C X) 0)) is
776 (truncate:A X). */
777 if (GET_CODE (op) == SUBREG
778 && SCALAR_INT_MODE_P (mode)
779 && SCALAR_INT_MODE_P (op_mode)
780 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
781 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
782 && subreg_lowpart_p (op))
784 rtx inner = XEXP (SUBREG_REG (op), 0);
785 if (GET_MODE_PRECISION (mode)
786 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
787 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
788 else
789 /* If subreg above is paradoxical and C is narrower
790 than A, return (subreg:A (truncate:C X) 0). */
791 return simplify_gen_subreg (mode, SUBREG_REG (op),
792 GET_MODE (SUBREG_REG (op)), 0);
795 /* (truncate:A (truncate:B X)) is (truncate:A X). */
796 if (GET_CODE (op) == TRUNCATE)
797 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
798 GET_MODE (XEXP (op, 0)));
800 return NULL_RTX;
803 /* Try to simplify a unary operation CODE whose output mode is to be
804 MODE with input operand OP whose mode was originally OP_MODE.
805 Return zero if no simplification can be made. */
807 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
808 rtx op, enum machine_mode op_mode)
810 rtx trueop, tem;
812 trueop = avoid_constant_pool_reference (op);
814 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
815 if (tem)
816 return tem;
818 return simplify_unary_operation_1 (code, mode, op);
821 /* Perform some simplifications we can do even if the operands
822 aren't constant. */
823 static rtx
824 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
826 enum rtx_code reversed;
827 rtx temp;
829 switch (code)
831 case NOT:
832 /* (not (not X)) == X. */
833 if (GET_CODE (op) == NOT)
834 return XEXP (op, 0);
836 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
837 comparison is all ones. */
838 if (COMPARISON_P (op)
839 && (mode == BImode || STORE_FLAG_VALUE == -1)
840 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
841 return simplify_gen_relational (reversed, mode, VOIDmode,
842 XEXP (op, 0), XEXP (op, 1));
844 /* (not (plus X -1)) can become (neg X). */
845 if (GET_CODE (op) == PLUS
846 && XEXP (op, 1) == constm1_rtx)
847 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
849 /* Similarly, (not (neg X)) is (plus X -1). */
850 if (GET_CODE (op) == NEG)
851 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
852 CONSTM1_RTX (mode));
854 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
855 if (GET_CODE (op) == XOR
856 && CONST_INT_P (XEXP (op, 1))
857 && (temp = simplify_unary_operation (NOT, mode,
858 XEXP (op, 1), mode)) != 0)
859 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
861 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
862 if (GET_CODE (op) == PLUS
863 && CONST_INT_P (XEXP (op, 1))
864 && mode_signbit_p (mode, XEXP (op, 1))
865 && (temp = simplify_unary_operation (NOT, mode,
866 XEXP (op, 1), mode)) != 0)
867 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
870 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
871 operands other than 1, but that is not valid. We could do a
872 similar simplification for (not (lshiftrt C X)) where C is
873 just the sign bit, but this doesn't seem common enough to
874 bother with. */
875 if (GET_CODE (op) == ASHIFT
876 && XEXP (op, 0) == const1_rtx)
878 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
879 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
882 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
883 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
884 so we can perform the above simplification. */
885 if (STORE_FLAG_VALUE == -1
886 && GET_CODE (op) == ASHIFTRT
887 && CONST_INT_P (XEXP (op, 1))
888 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
889 return simplify_gen_relational (GE, mode, VOIDmode,
890 XEXP (op, 0), const0_rtx);
893 if (GET_CODE (op) == SUBREG
894 && subreg_lowpart_p (op)
895 && (GET_MODE_SIZE (GET_MODE (op))
896 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
897 && GET_CODE (SUBREG_REG (op)) == ASHIFT
898 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
900 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
901 rtx x;
903 x = gen_rtx_ROTATE (inner_mode,
904 simplify_gen_unary (NOT, inner_mode, const1_rtx,
905 inner_mode),
906 XEXP (SUBREG_REG (op), 1));
907 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
908 if (temp)
909 return temp;
912 /* Apply De Morgan's laws to reduce number of patterns for machines
913 with negating logical insns (and-not, nand, etc.). If result has
914 only one NOT, put it first, since that is how the patterns are
915 coded. */
916 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
918 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
919 enum machine_mode op_mode;
921 op_mode = GET_MODE (in1);
922 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
924 op_mode = GET_MODE (in2);
925 if (op_mode == VOIDmode)
926 op_mode = mode;
927 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
929 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
931 rtx tem = in2;
932 in2 = in1; in1 = tem;
935 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
936 mode, in1, in2);
939 /* (not (bswap x)) -> (bswap (not x)). */
940 if (GET_CODE (op) == BSWAP)
942 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
943 return simplify_gen_unary (BSWAP, mode, x, mode);
945 break;
947 case NEG:
948 /* (neg (neg X)) == X. */
949 if (GET_CODE (op) == NEG)
950 return XEXP (op, 0);
952 /* (neg (plus X 1)) can become (not X). */
953 if (GET_CODE (op) == PLUS
954 && XEXP (op, 1) == const1_rtx)
955 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
957 /* Similarly, (neg (not X)) is (plus X 1). */
958 if (GET_CODE (op) == NOT)
959 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
960 CONST1_RTX (mode));
962 /* (neg (minus X Y)) can become (minus Y X). This transformation
963 isn't safe for modes with signed zeros, since if X and Y are
964 both +0, (minus Y X) is the same as (minus X Y). If the
965 rounding mode is towards +infinity (or -infinity) then the two
966 expressions will be rounded differently. */
967 if (GET_CODE (op) == MINUS
968 && !HONOR_SIGNED_ZEROS (mode)
969 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
970 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
972 if (GET_CODE (op) == PLUS
973 && !HONOR_SIGNED_ZEROS (mode)
974 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
976 /* (neg (plus A C)) is simplified to (minus -C A). */
977 if (CONST_SCALAR_INT_P (XEXP (op, 1))
978 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
980 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
981 if (temp)
982 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
985 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
986 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
987 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
990 /* (neg (mult A B)) becomes (mult A (neg B)).
991 This works even for floating-point values. */
992 if (GET_CODE (op) == MULT
993 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
995 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
996 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
999 /* NEG commutes with ASHIFT since it is multiplication. Only do
1000 this if we can then eliminate the NEG (e.g., if the operand
1001 is a constant). */
1002 if (GET_CODE (op) == ASHIFT)
1004 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1005 if (temp)
1006 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1009 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1010 C is equal to the width of MODE minus 1. */
1011 if (GET_CODE (op) == ASHIFTRT
1012 && CONST_INT_P (XEXP (op, 1))
1013 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1014 return simplify_gen_binary (LSHIFTRT, mode,
1015 XEXP (op, 0), XEXP (op, 1));
1017 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == LSHIFTRT
1020 && CONST_INT_P (XEXP (op, 1))
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1022 return simplify_gen_binary (ASHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1025 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1026 if (GET_CODE (op) == XOR
1027 && XEXP (op, 1) == const1_rtx
1028 && nonzero_bits (XEXP (op, 0), mode) == 1)
1029 return plus_constant (mode, XEXP (op, 0), -1);
1031 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1032 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1033 if (GET_CODE (op) == LT
1034 && XEXP (op, 1) == const0_rtx
1035 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1037 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1038 int isize = GET_MODE_PRECISION (inner);
1039 if (STORE_FLAG_VALUE == 1)
1041 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1042 GEN_INT (isize - 1));
1043 if (mode == inner)
1044 return temp;
1045 if (GET_MODE_PRECISION (mode) > isize)
1046 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1047 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1049 else if (STORE_FLAG_VALUE == -1)
1051 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1052 GEN_INT (isize - 1));
1053 if (mode == inner)
1054 return temp;
1055 if (GET_MODE_PRECISION (mode) > isize)
1056 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1057 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1060 break;
1062 case TRUNCATE:
1063 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1064 with the umulXi3_highpart patterns. */
1065 if (GET_CODE (op) == LSHIFTRT
1066 && GET_CODE (XEXP (op, 0)) == MULT)
1067 break;
1069 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1071 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1073 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1074 if (temp)
1075 return temp;
1077 /* We can't handle truncation to a partial integer mode here
1078 because we don't know the real bitsize of the partial
1079 integer mode. */
1080 break;
1083 if (GET_MODE (op) != VOIDmode)
1085 temp = simplify_truncation (mode, op, GET_MODE (op));
1086 if (temp)
1087 return temp;
1090 /* If we know that the value is already truncated, we can
1091 replace the TRUNCATE with a SUBREG. */
1092 if (GET_MODE_NUNITS (mode) == 1
1093 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1094 || truncated_to_mode (mode, op)))
1096 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1097 if (temp)
1098 return temp;
1101 /* A truncate of a comparison can be replaced with a subreg if
1102 STORE_FLAG_VALUE permits. This is like the previous test,
1103 but it works even if the comparison is done in a mode larger
1104 than HOST_BITS_PER_WIDE_INT. */
1105 if (HWI_COMPUTABLE_MODE_P (mode)
1106 && COMPARISON_P (op)
1107 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1109 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1110 if (temp)
1111 return temp;
1114 /* A truncate of a memory is just loading the low part of the memory
1115 if we are not changing the meaning of the address. */
1116 if (GET_CODE (op) == MEM
1117 && !VECTOR_MODE_P (mode)
1118 && !MEM_VOLATILE_P (op)
1119 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1121 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1122 if (temp)
1123 return temp;
1126 break;
1128 case FLOAT_TRUNCATE:
1129 if (DECIMAL_FLOAT_MODE_P (mode))
1130 break;
1132 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1133 if (GET_CODE (op) == FLOAT_EXTEND
1134 && GET_MODE (XEXP (op, 0)) == mode)
1135 return XEXP (op, 0);
1137 /* (float_truncate:SF (float_truncate:DF foo:XF))
1138 = (float_truncate:SF foo:XF).
1139 This may eliminate double rounding, so it is unsafe.
1141 (float_truncate:SF (float_extend:XF foo:DF))
1142 = (float_truncate:SF foo:DF).
1144 (float_truncate:DF (float_extend:XF foo:SF))
1145 = (float_extend:SF foo:DF). */
1146 if ((GET_CODE (op) == FLOAT_TRUNCATE
1147 && flag_unsafe_math_optimizations)
1148 || GET_CODE (op) == FLOAT_EXTEND)
1149 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1150 0)))
1151 > GET_MODE_SIZE (mode)
1152 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1153 mode,
1154 XEXP (op, 0), mode);
1156 /* (float_truncate (float x)) is (float x) */
1157 if (GET_CODE (op) == FLOAT
1158 && (flag_unsafe_math_optimizations
1159 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1160 && ((unsigned)significand_size (GET_MODE (op))
1161 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1162 - num_sign_bit_copies (XEXP (op, 0),
1163 GET_MODE (XEXP (op, 0))))))))
1164 return simplify_gen_unary (FLOAT, mode,
1165 XEXP (op, 0),
1166 GET_MODE (XEXP (op, 0)));
1168 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1169 (OP:SF foo:SF) if OP is NEG or ABS. */
1170 if ((GET_CODE (op) == ABS
1171 || GET_CODE (op) == NEG)
1172 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1173 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1174 return simplify_gen_unary (GET_CODE (op), mode,
1175 XEXP (XEXP (op, 0), 0), mode);
1177 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1178 is (float_truncate:SF x). */
1179 if (GET_CODE (op) == SUBREG
1180 && subreg_lowpart_p (op)
1181 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1182 return SUBREG_REG (op);
1183 break;
1185 case FLOAT_EXTEND:
1186 if (DECIMAL_FLOAT_MODE_P (mode))
1187 break;
1189 /* (float_extend (float_extend x)) is (float_extend x)
1191 (float_extend (float x)) is (float x) assuming that double
1192 rounding can't happen.
1194 if (GET_CODE (op) == FLOAT_EXTEND
1195 || (GET_CODE (op) == FLOAT
1196 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1197 && ((unsigned)significand_size (GET_MODE (op))
1198 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1199 - num_sign_bit_copies (XEXP (op, 0),
1200 GET_MODE (XEXP (op, 0)))))))
1201 return simplify_gen_unary (GET_CODE (op), mode,
1202 XEXP (op, 0),
1203 GET_MODE (XEXP (op, 0)));
1205 break;
1207 case ABS:
1208 /* (abs (neg <foo>)) -> (abs <foo>) */
1209 if (GET_CODE (op) == NEG)
1210 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1211 GET_MODE (XEXP (op, 0)));
1213 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1214 do nothing. */
1215 if (GET_MODE (op) == VOIDmode)
1216 break;
1218 /* If operand is something known to be positive, ignore the ABS. */
1219 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1220 || val_signbit_known_clear_p (GET_MODE (op),
1221 nonzero_bits (op, GET_MODE (op))))
1222 return op;
1224 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1225 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1226 return gen_rtx_NEG (mode, op);
1228 break;
1230 case FFS:
1231 /* (ffs (*_extend <X>)) = (ffs <X>) */
1232 if (GET_CODE (op) == SIGN_EXTEND
1233 || GET_CODE (op) == ZERO_EXTEND)
1234 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1236 break;
1238 case POPCOUNT:
1239 switch (GET_CODE (op))
1241 case BSWAP:
1242 case ZERO_EXTEND:
1243 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1244 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1245 GET_MODE (XEXP (op, 0)));
1247 case ROTATE:
1248 case ROTATERT:
1249 /* Rotations don't affect popcount. */
1250 if (!side_effects_p (XEXP (op, 1)))
1251 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1252 GET_MODE (XEXP (op, 0)));
1253 break;
1255 default:
1256 break;
1258 break;
1260 case PARITY:
1261 switch (GET_CODE (op))
1263 case NOT:
1264 case BSWAP:
1265 case ZERO_EXTEND:
1266 case SIGN_EXTEND:
1267 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1270 case ROTATE:
1271 case ROTATERT:
1272 /* Rotations don't affect parity. */
1273 if (!side_effects_p (XEXP (op, 1)))
1274 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1275 GET_MODE (XEXP (op, 0)));
1276 break;
1278 default:
1279 break;
1281 break;
1283 case BSWAP:
1284 /* (bswap (bswap x)) -> x. */
1285 if (GET_CODE (op) == BSWAP)
1286 return XEXP (op, 0);
1287 break;
1289 case FLOAT:
1290 /* (float (sign_extend <X>)) = (float <X>). */
1291 if (GET_CODE (op) == SIGN_EXTEND)
1292 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1293 GET_MODE (XEXP (op, 0)));
1294 break;
1296 case SIGN_EXTEND:
1297 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1298 becomes just the MINUS if its mode is MODE. This allows
1299 folding switch statements on machines using casesi (such as
1300 the VAX). */
1301 if (GET_CODE (op) == TRUNCATE
1302 && GET_MODE (XEXP (op, 0)) == mode
1303 && GET_CODE (XEXP (op, 0)) == MINUS
1304 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1305 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1306 return XEXP (op, 0);
1308 /* Extending a widening multiplication should be canonicalized to
1309 a wider widening multiplication. */
1310 if (GET_CODE (op) == MULT)
1312 rtx lhs = XEXP (op, 0);
1313 rtx rhs = XEXP (op, 1);
1314 enum rtx_code lcode = GET_CODE (lhs);
1315 enum rtx_code rcode = GET_CODE (rhs);
1317 /* Widening multiplies usually extend both operands, but sometimes
1318 they use a shift to extract a portion of a register. */
1319 if ((lcode == SIGN_EXTEND
1320 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1321 && (rcode == SIGN_EXTEND
1322 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1324 enum machine_mode lmode = GET_MODE (lhs);
1325 enum machine_mode rmode = GET_MODE (rhs);
1326 int bits;
1328 if (lcode == ASHIFTRT)
1329 /* Number of bits not shifted off the end. */
1330 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1331 else /* lcode == SIGN_EXTEND */
1332 /* Size of inner mode. */
1333 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1335 if (rcode == ASHIFTRT)
1336 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1337 else /* rcode == SIGN_EXTEND */
1338 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1340 /* We can only widen multiplies if the result is mathematiclly
1341 equivalent. I.e. if overflow was impossible. */
1342 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1343 return simplify_gen_binary
1344 (MULT, mode,
1345 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1346 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1350 /* Check for a sign extension of a subreg of a promoted
1351 variable, where the promotion is sign-extended, and the
1352 target mode is the same as the variable's promotion. */
1353 if (GET_CODE (op) == SUBREG
1354 && SUBREG_PROMOTED_VAR_P (op)
1355 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1356 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1358 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1359 if (temp)
1360 return temp;
1363 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1364 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1365 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1367 gcc_assert (GET_MODE_BITSIZE (mode)
1368 > GET_MODE_BITSIZE (GET_MODE (op)));
1369 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1370 GET_MODE (XEXP (op, 0)));
1373 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1374 is (sign_extend:M (subreg:O <X>)) if there is mode with
1375 GET_MODE_BITSIZE (N) - I bits.
1376 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1377 is similarly (zero_extend:M (subreg:O <X>)). */
1378 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1379 && GET_CODE (XEXP (op, 0)) == ASHIFT
1380 && CONST_INT_P (XEXP (op, 1))
1381 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1382 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1384 enum machine_mode tmode
1385 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1386 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1387 gcc_assert (GET_MODE_BITSIZE (mode)
1388 > GET_MODE_BITSIZE (GET_MODE (op)));
1389 if (tmode != BLKmode)
1391 rtx inner =
1392 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1393 if (inner)
1394 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1395 ? SIGN_EXTEND : ZERO_EXTEND,
1396 mode, inner, tmode);
1400 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1401 /* As we do not know which address space the pointer is referring to,
1402 we can do this only if the target does not support different pointer
1403 or address modes depending on the address space. */
1404 if (target_default_pointer_address_modes_p ()
1405 && ! POINTERS_EXTEND_UNSIGNED
1406 && mode == Pmode && GET_MODE (op) == ptr_mode
1407 && (CONSTANT_P (op)
1408 || (GET_CODE (op) == SUBREG
1409 && REG_P (SUBREG_REG (op))
1410 && REG_POINTER (SUBREG_REG (op))
1411 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1412 return convert_memory_address (Pmode, op);
1413 #endif
1414 break;
1416 case ZERO_EXTEND:
1417 /* Check for a zero extension of a subreg of a promoted
1418 variable, where the promotion is zero-extended, and the
1419 target mode is the same as the variable's promotion. */
1420 if (GET_CODE (op) == SUBREG
1421 && SUBREG_PROMOTED_VAR_P (op)
1422 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1423 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1425 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1426 if (temp)
1427 return temp;
1430 /* Extending a widening multiplication should be canonicalized to
1431 a wider widening multiplication. */
1432 if (GET_CODE (op) == MULT)
1434 rtx lhs = XEXP (op, 0);
1435 rtx rhs = XEXP (op, 1);
1436 enum rtx_code lcode = GET_CODE (lhs);
1437 enum rtx_code rcode = GET_CODE (rhs);
1439 /* Widening multiplies usually extend both operands, but sometimes
1440 they use a shift to extract a portion of a register. */
1441 if ((lcode == ZERO_EXTEND
1442 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1443 && (rcode == ZERO_EXTEND
1444 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1446 enum machine_mode lmode = GET_MODE (lhs);
1447 enum machine_mode rmode = GET_MODE (rhs);
1448 int bits;
1450 if (lcode == LSHIFTRT)
1451 /* Number of bits not shifted off the end. */
1452 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1453 else /* lcode == ZERO_EXTEND */
1454 /* Size of inner mode. */
1455 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1457 if (rcode == LSHIFTRT)
1458 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1459 else /* rcode == ZERO_EXTEND */
1460 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1462 /* We can only widen multiplies if the result is mathematiclly
1463 equivalent. I.e. if overflow was impossible. */
1464 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1465 return simplify_gen_binary
1466 (MULT, mode,
1467 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1468 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1472 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1473 if (GET_CODE (op) == ZERO_EXTEND)
1474 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1475 GET_MODE (XEXP (op, 0)));
1477 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1478 is (zero_extend:M (subreg:O <X>)) if there is mode with
1479 GET_MODE_BITSIZE (N) - I bits. */
1480 if (GET_CODE (op) == LSHIFTRT
1481 && GET_CODE (XEXP (op, 0)) == ASHIFT
1482 && CONST_INT_P (XEXP (op, 1))
1483 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1484 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1486 enum machine_mode tmode
1487 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1488 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1489 if (tmode != BLKmode)
1491 rtx inner =
1492 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1493 if (inner)
1494 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1498 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1499 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1500 of mode N. E.g.
1501 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1502 (and:SI (reg:SI) (const_int 63)). */
1503 if (GET_CODE (op) == SUBREG
1504 && GET_MODE_PRECISION (GET_MODE (op))
1505 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1506 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1507 <= HOST_BITS_PER_WIDE_INT
1508 && GET_MODE_PRECISION (mode)
1509 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1510 && subreg_lowpart_p (op)
1511 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1512 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1514 if (GET_MODE_PRECISION (mode)
1515 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1516 return SUBREG_REG (op);
1517 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1518 GET_MODE (SUBREG_REG (op)));
1521 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1522 /* As we do not know which address space the pointer is referring to,
1523 we can do this only if the target does not support different pointer
1524 or address modes depending on the address space. */
1525 if (target_default_pointer_address_modes_p ()
1526 && POINTERS_EXTEND_UNSIGNED > 0
1527 && mode == Pmode && GET_MODE (op) == ptr_mode
1528 && (CONSTANT_P (op)
1529 || (GET_CODE (op) == SUBREG
1530 && REG_P (SUBREG_REG (op))
1531 && REG_POINTER (SUBREG_REG (op))
1532 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1533 return convert_memory_address (Pmode, op);
1534 #endif
1535 break;
1537 default:
1538 break;
1541 return 0;
1544 /* Try to compute the value of a unary operation CODE whose output mode is to
1545 be MODE with input operand OP whose mode was originally OP_MODE.
1546 Return zero if the value cannot be computed. */
1548 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1549 rtx op, enum machine_mode op_mode)
1551 unsigned int width = GET_MODE_PRECISION (mode);
1553 if (code == VEC_DUPLICATE)
1555 gcc_assert (VECTOR_MODE_P (mode));
1556 if (GET_MODE (op) != VOIDmode)
1558 if (!VECTOR_MODE_P (GET_MODE (op)))
1559 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1560 else
1561 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1562 (GET_MODE (op)));
1564 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1565 || GET_CODE (op) == CONST_VECTOR)
1567 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1568 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1569 rtvec v = rtvec_alloc (n_elts);
1570 unsigned int i;
1572 if (GET_CODE (op) != CONST_VECTOR)
1573 for (i = 0; i < n_elts; i++)
1574 RTVEC_ELT (v, i) = op;
1575 else
1577 enum machine_mode inmode = GET_MODE (op);
1578 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1579 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1581 gcc_assert (in_n_elts < n_elts);
1582 gcc_assert ((n_elts % in_n_elts) == 0);
1583 for (i = 0; i < n_elts; i++)
1584 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1586 return gen_rtx_CONST_VECTOR (mode, v);
1590 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1592 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1593 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1594 enum machine_mode opmode = GET_MODE (op);
1595 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1596 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1597 rtvec v = rtvec_alloc (n_elts);
1598 unsigned int i;
1600 gcc_assert (op_n_elts == n_elts);
1601 for (i = 0; i < n_elts; i++)
1603 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1604 CONST_VECTOR_ELT (op, i),
1605 GET_MODE_INNER (opmode));
1606 if (!x)
1607 return 0;
1608 RTVEC_ELT (v, i) = x;
1610 return gen_rtx_CONST_VECTOR (mode, v);
1613 /* The order of these tests is critical so that, for example, we don't
1614 check the wrong mode (input vs. output) for a conversion operation,
1615 such as FIX. At some point, this should be simplified. */
1617 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1619 REAL_VALUE_TYPE d;
1621 if (op_mode == VOIDmode)
1623 /* CONST_INT have VOIDmode as the mode. We assume that all
1624 the bits of the constant are significant, though, this is
1625 a dangerous assumption as many times CONST_INTs are
1626 created and used with garbage in the bits outside of the
1627 precision of the implied mode of the const_int. */
1628 op_mode = MAX_MODE_INT;
1631 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1632 d = real_value_truncate (mode, d);
1633 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1635 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1637 REAL_VALUE_TYPE d;
1639 if (op_mode == VOIDmode)
1641 /* CONST_INT have VOIDmode as the mode. We assume that all
1642 the bits of the constant are significant, though, this is
1643 a dangerous assumption as many times CONST_INTs are
1644 created and used with garbage in the bits outside of the
1645 precision of the implied mode of the const_int. */
1646 op_mode = MAX_MODE_INT;
1649 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1650 d = real_value_truncate (mode, d);
1651 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1654 if (CONST_SCALAR_INT_P (op) && width > 0)
1656 wide_int result;
1657 enum machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1658 rtx_mode_t op0 = std::make_pair (op, imode);
1659 int int_value;
1661 #if TARGET_SUPPORTS_WIDE_INT == 0
1662 /* This assert keeps the simplification from producing a result
1663 that cannot be represented in a CONST_DOUBLE but a lot of
1664 upstream callers expect that this function never fails to
1665 simplify something and so you if you added this to the test
1666 above the code would die later anyway. If this assert
1667 happens, you just need to make the port support wide int. */
1668 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1669 #endif
1671 switch (code)
1673 case NOT:
1674 result = wi::bit_not (op0);
1675 break;
1677 case NEG:
1678 result = wi::neg (op0);
1679 break;
1681 case ABS:
1682 result = wi::abs (op0);
1683 break;
1685 case FFS:
1686 result = wi::shwi (wi::ffs (op0), mode);
1687 break;
1689 case CLZ:
1690 if (wi::ne_p (op0, 0))
1691 int_value = wi::clz (op0);
1692 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1693 int_value = GET_MODE_PRECISION (mode);
1694 result = wi::shwi (int_value, mode);
1695 break;
1697 case CLRSB:
1698 result = wi::shwi (wi::clrsb (op0), mode);
1699 break;
1701 case CTZ:
1702 if (wi::ne_p (op0, 0))
1703 int_value = wi::ctz (op0);
1704 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1705 int_value = GET_MODE_PRECISION (mode);
1706 result = wi::shwi (int_value, mode);
1707 break;
1709 case POPCOUNT:
1710 result = wi::shwi (wi::popcount (op0), mode);
1711 break;
1713 case PARITY:
1714 result = wi::shwi (wi::parity (op0), mode);
1715 break;
1717 case BSWAP:
1718 result = wide_int (op0).bswap ();
1719 break;
1721 case TRUNCATE:
1722 case ZERO_EXTEND:
1723 result = wide_int::from (op0, width, UNSIGNED);
1724 break;
1726 case SIGN_EXTEND:
1727 result = wide_int::from (op0, width, SIGNED);
1728 break;
1730 case SQRT:
1731 default:
1732 return 0;
1735 return immed_wide_int_const (result, mode);
1738 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1739 && SCALAR_FLOAT_MODE_P (mode)
1740 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1742 REAL_VALUE_TYPE d;
1743 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1745 switch (code)
1747 case SQRT:
1748 return 0;
1749 case ABS:
1750 d = real_value_abs (&d);
1751 break;
1752 case NEG:
1753 d = real_value_negate (&d);
1754 break;
1755 case FLOAT_TRUNCATE:
1756 d = real_value_truncate (mode, d);
1757 break;
1758 case FLOAT_EXTEND:
1759 /* All this does is change the mode, unless changing
1760 mode class. */
1761 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1762 real_convert (&d, mode, &d);
1763 break;
1764 case FIX:
1765 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1766 break;
1767 case NOT:
1769 long tmp[4];
1770 int i;
1772 real_to_target (tmp, &d, GET_MODE (op));
1773 for (i = 0; i < 4; i++)
1774 tmp[i] = ~tmp[i];
1775 real_from_target (&d, tmp, mode);
1776 break;
1778 default:
1779 gcc_unreachable ();
1781 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1783 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1784 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1785 && GET_MODE_CLASS (mode) == MODE_INT
1786 && width > 0)
1788 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1789 operators are intentionally left unspecified (to ease implementation
1790 by target backends), for consistency, this routine implements the
1791 same semantics for constant folding as used by the middle-end. */
1793 /* This was formerly used only for non-IEEE float.
1794 eggert@twinsun.com says it is safe for IEEE also. */
1795 REAL_VALUE_TYPE x, t;
1796 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1797 wide_int wmax, wmin;
1798 /* This is part of the abi to real_to_integer, but we check
1799 things before making this call. */
1800 bool fail;
1802 switch (code)
1804 case FIX:
1805 if (REAL_VALUE_ISNAN (x))
1806 return const0_rtx;
1808 /* Test against the signed upper bound. */
1809 wmax = wi::max_value (width, SIGNED);
1810 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1811 if (REAL_VALUES_LESS (t, x))
1812 return immed_wide_int_const (wmax, mode);
1814 /* Test against the signed lower bound. */
1815 wmin = wi::min_value (width, SIGNED);
1816 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1817 if (REAL_VALUES_LESS (x, t))
1818 return immed_wide_int_const (wmin, mode);
1820 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1821 break;
1823 case UNSIGNED_FIX:
1824 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1825 return const0_rtx;
1827 /* Test against the unsigned upper bound. */
1828 wmax = wi::max_value (width, UNSIGNED);
1829 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1830 if (REAL_VALUES_LESS (t, x))
1831 return immed_wide_int_const (wmax, mode);
1833 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1834 mode);
1835 break;
1837 default:
1838 gcc_unreachable ();
1842 return NULL_RTX;
1845 /* Subroutine of simplify_binary_operation to simplify a binary operation
1846 CODE that can commute with byte swapping, with result mode MODE and
1847 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1848 Return zero if no simplification or canonicalization is possible. */
1850 static rtx
1851 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
1852 rtx op0, rtx op1)
1854 rtx tem;
1856 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1857 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1859 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1860 simplify_gen_unary (BSWAP, mode, op1, mode));
1861 return simplify_gen_unary (BSWAP, mode, tem, mode);
1864 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1865 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1867 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1868 return simplify_gen_unary (BSWAP, mode, tem, mode);
1871 return NULL_RTX;
1874 /* Subroutine of simplify_binary_operation to simplify a commutative,
1875 associative binary operation CODE with result mode MODE, operating
1876 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1877 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1878 canonicalization is possible. */
1880 static rtx
1881 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1882 rtx op0, rtx op1)
1884 rtx tem;
1886 /* Linearize the operator to the left. */
1887 if (GET_CODE (op1) == code)
1889 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1890 if (GET_CODE (op0) == code)
1892 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1893 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1896 /* "a op (b op c)" becomes "(b op c) op a". */
1897 if (! swap_commutative_operands_p (op1, op0))
1898 return simplify_gen_binary (code, mode, op1, op0);
1900 tem = op0;
1901 op0 = op1;
1902 op1 = tem;
1905 if (GET_CODE (op0) == code)
1907 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1908 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1910 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1911 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1914 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1915 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1916 if (tem != 0)
1917 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1919 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1920 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1921 if (tem != 0)
1922 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1925 return 0;
1929 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1930 and OP1. Return 0 if no simplification is possible.
1932 Don't use this for relational operations such as EQ or LT.
1933 Use simplify_relational_operation instead. */
1935 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1936 rtx op0, rtx op1)
1938 rtx trueop0, trueop1;
1939 rtx tem;
1941 /* Relational operations don't work here. We must know the mode
1942 of the operands in order to do the comparison correctly.
1943 Assuming a full word can give incorrect results.
1944 Consider comparing 128 with -128 in QImode. */
1945 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1946 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1948 /* Make sure the constant is second. */
1949 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1950 && swap_commutative_operands_p (op0, op1))
1952 tem = op0, op0 = op1, op1 = tem;
1955 trueop0 = avoid_constant_pool_reference (op0);
1956 trueop1 = avoid_constant_pool_reference (op1);
1958 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1959 if (tem)
1960 return tem;
1961 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1964 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1965 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1966 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1967 actual constants. */
1969 static rtx
1970 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1971 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1973 rtx tem, reversed, opleft, opright;
1974 HOST_WIDE_INT val;
1975 unsigned int width = GET_MODE_PRECISION (mode);
1977 /* Even if we can't compute a constant result,
1978 there are some cases worth simplifying. */
1980 switch (code)
1982 case PLUS:
1983 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1984 when x is NaN, infinite, or finite and nonzero. They aren't
1985 when x is -0 and the rounding mode is not towards -infinity,
1986 since (-0) + 0 is then 0. */
1987 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1988 return op0;
1990 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1991 transformations are safe even for IEEE. */
1992 if (GET_CODE (op0) == NEG)
1993 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1994 else if (GET_CODE (op1) == NEG)
1995 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1997 /* (~a) + 1 -> -a */
1998 if (INTEGRAL_MODE_P (mode)
1999 && GET_CODE (op0) == NOT
2000 && trueop1 == const1_rtx)
2001 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2003 /* Handle both-operands-constant cases. We can only add
2004 CONST_INTs to constants since the sum of relocatable symbols
2005 can't be handled by most assemblers. Don't add CONST_INT
2006 to CONST_INT since overflow won't be computed properly if wider
2007 than HOST_BITS_PER_WIDE_INT. */
2009 if ((GET_CODE (op0) == CONST
2010 || GET_CODE (op0) == SYMBOL_REF
2011 || GET_CODE (op0) == LABEL_REF)
2012 && CONST_INT_P (op1))
2013 return plus_constant (mode, op0, INTVAL (op1));
2014 else if ((GET_CODE (op1) == CONST
2015 || GET_CODE (op1) == SYMBOL_REF
2016 || GET_CODE (op1) == LABEL_REF)
2017 && CONST_INT_P (op0))
2018 return plus_constant (mode, op1, INTVAL (op0));
2020 /* See if this is something like X * C - X or vice versa or
2021 if the multiplication is written as a shift. If so, we can
2022 distribute and make a new multiply, shift, or maybe just
2023 have X (if C is 2 in the example above). But don't make
2024 something more expensive than we had before. */
2026 if (SCALAR_INT_MODE_P (mode))
2028 rtx lhs = op0, rhs = op1;
2030 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2031 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2033 if (GET_CODE (lhs) == NEG)
2035 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2036 lhs = XEXP (lhs, 0);
2038 else if (GET_CODE (lhs) == MULT
2039 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2041 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2042 lhs = XEXP (lhs, 0);
2044 else if (GET_CODE (lhs) == ASHIFT
2045 && CONST_INT_P (XEXP (lhs, 1))
2046 && INTVAL (XEXP (lhs, 1)) >= 0
2047 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2049 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2050 GET_MODE_PRECISION (mode));
2051 lhs = XEXP (lhs, 0);
2054 if (GET_CODE (rhs) == NEG)
2056 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2057 rhs = XEXP (rhs, 0);
2059 else if (GET_CODE (rhs) == MULT
2060 && CONST_INT_P (XEXP (rhs, 1)))
2062 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2063 rhs = XEXP (rhs, 0);
2065 else if (GET_CODE (rhs) == ASHIFT
2066 && CONST_INT_P (XEXP (rhs, 1))
2067 && INTVAL (XEXP (rhs, 1)) >= 0
2068 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2070 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2071 GET_MODE_PRECISION (mode));
2072 rhs = XEXP (rhs, 0);
2075 if (rtx_equal_p (lhs, rhs))
2077 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2078 rtx coeff;
2079 bool speed = optimize_function_for_speed_p (cfun);
2081 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2083 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2084 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2085 ? tem : 0;
2089 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2090 if (CONST_SCALAR_INT_P (op1)
2091 && GET_CODE (op0) == XOR
2092 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2093 && mode_signbit_p (mode, op1))
2094 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2095 simplify_gen_binary (XOR, mode, op1,
2096 XEXP (op0, 1)));
2098 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2099 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2100 && GET_CODE (op0) == MULT
2101 && GET_CODE (XEXP (op0, 0)) == NEG)
2103 rtx in1, in2;
2105 in1 = XEXP (XEXP (op0, 0), 0);
2106 in2 = XEXP (op0, 1);
2107 return simplify_gen_binary (MINUS, mode, op1,
2108 simplify_gen_binary (MULT, mode,
2109 in1, in2));
2112 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2113 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2114 is 1. */
2115 if (COMPARISON_P (op0)
2116 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2117 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2118 && (reversed = reversed_comparison (op0, mode)))
2119 return
2120 simplify_gen_unary (NEG, mode, reversed, mode);
2122 /* If one of the operands is a PLUS or a MINUS, see if we can
2123 simplify this by the associative law.
2124 Don't use the associative law for floating point.
2125 The inaccuracy makes it nonassociative,
2126 and subtle programs can break if operations are associated. */
2128 if (INTEGRAL_MODE_P (mode)
2129 && (plus_minus_operand_p (op0)
2130 || plus_minus_operand_p (op1))
2131 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2132 return tem;
2134 /* Reassociate floating point addition only when the user
2135 specifies associative math operations. */
2136 if (FLOAT_MODE_P (mode)
2137 && flag_associative_math)
2139 tem = simplify_associative_operation (code, mode, op0, op1);
2140 if (tem)
2141 return tem;
2143 break;
2145 case COMPARE:
2146 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2147 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2148 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2149 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2151 rtx xop00 = XEXP (op0, 0);
2152 rtx xop10 = XEXP (op1, 0);
2154 #ifdef HAVE_cc0
2155 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2156 #else
2157 if (REG_P (xop00) && REG_P (xop10)
2158 && GET_MODE (xop00) == GET_MODE (xop10)
2159 && REGNO (xop00) == REGNO (xop10)
2160 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2161 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2162 #endif
2163 return xop00;
2165 break;
2167 case MINUS:
2168 /* We can't assume x-x is 0 even with non-IEEE floating point,
2169 but since it is zero except in very strange circumstances, we
2170 will treat it as zero with -ffinite-math-only. */
2171 if (rtx_equal_p (trueop0, trueop1)
2172 && ! side_effects_p (op0)
2173 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2174 return CONST0_RTX (mode);
2176 /* Change subtraction from zero into negation. (0 - x) is the
2177 same as -x when x is NaN, infinite, or finite and nonzero.
2178 But if the mode has signed zeros, and does not round towards
2179 -infinity, then 0 - 0 is 0, not -0. */
2180 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2181 return simplify_gen_unary (NEG, mode, op1, mode);
2183 /* (-1 - a) is ~a. */
2184 if (trueop0 == constm1_rtx)
2185 return simplify_gen_unary (NOT, mode, op1, mode);
2187 /* Subtracting 0 has no effect unless the mode has signed zeros
2188 and supports rounding towards -infinity. In such a case,
2189 0 - 0 is -0. */
2190 if (!(HONOR_SIGNED_ZEROS (mode)
2191 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2192 && trueop1 == CONST0_RTX (mode))
2193 return op0;
2195 /* See if this is something like X * C - X or vice versa or
2196 if the multiplication is written as a shift. If so, we can
2197 distribute and make a new multiply, shift, or maybe just
2198 have X (if C is 2 in the example above). But don't make
2199 something more expensive than we had before. */
2201 if (SCALAR_INT_MODE_P (mode))
2203 rtx lhs = op0, rhs = op1;
2205 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2206 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2208 if (GET_CODE (lhs) == NEG)
2210 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2211 lhs = XEXP (lhs, 0);
2213 else if (GET_CODE (lhs) == MULT
2214 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2216 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2217 lhs = XEXP (lhs, 0);
2219 else if (GET_CODE (lhs) == ASHIFT
2220 && CONST_INT_P (XEXP (lhs, 1))
2221 && INTVAL (XEXP (lhs, 1)) >= 0
2222 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2224 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2225 GET_MODE_PRECISION (mode));
2226 lhs = XEXP (lhs, 0);
2229 if (GET_CODE (rhs) == NEG)
2231 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2232 rhs = XEXP (rhs, 0);
2234 else if (GET_CODE (rhs) == MULT
2235 && CONST_INT_P (XEXP (rhs, 1)))
2237 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2238 rhs = XEXP (rhs, 0);
2240 else if (GET_CODE (rhs) == ASHIFT
2241 && CONST_INT_P (XEXP (rhs, 1))
2242 && INTVAL (XEXP (rhs, 1)) >= 0
2243 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2245 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2246 GET_MODE_PRECISION (mode));
2247 negcoeff1 = -negcoeff1;
2248 rhs = XEXP (rhs, 0);
2251 if (rtx_equal_p (lhs, rhs))
2253 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2254 rtx coeff;
2255 bool speed = optimize_function_for_speed_p (cfun);
2257 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2259 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2260 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2261 ? tem : 0;
2265 /* (a - (-b)) -> (a + b). True even for IEEE. */
2266 if (GET_CODE (op1) == NEG)
2267 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2269 /* (-x - c) may be simplified as (-c - x). */
2270 if (GET_CODE (op0) == NEG
2271 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2273 tem = simplify_unary_operation (NEG, mode, op1, mode);
2274 if (tem)
2275 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2278 /* Don't let a relocatable value get a negative coeff. */
2279 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2280 return simplify_gen_binary (PLUS, mode,
2281 op0,
2282 neg_const_int (mode, op1));
2284 /* (x - (x & y)) -> (x & ~y) */
2285 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2287 if (rtx_equal_p (op0, XEXP (op1, 0)))
2289 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2290 GET_MODE (XEXP (op1, 1)));
2291 return simplify_gen_binary (AND, mode, op0, tem);
2293 if (rtx_equal_p (op0, XEXP (op1, 1)))
2295 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2296 GET_MODE (XEXP (op1, 0)));
2297 return simplify_gen_binary (AND, mode, op0, tem);
2301 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2302 by reversing the comparison code if valid. */
2303 if (STORE_FLAG_VALUE == 1
2304 && trueop0 == const1_rtx
2305 && COMPARISON_P (op1)
2306 && (reversed = reversed_comparison (op1, mode)))
2307 return reversed;
2309 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2310 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2311 && GET_CODE (op1) == MULT
2312 && GET_CODE (XEXP (op1, 0)) == NEG)
2314 rtx in1, in2;
2316 in1 = XEXP (XEXP (op1, 0), 0);
2317 in2 = XEXP (op1, 1);
2318 return simplify_gen_binary (PLUS, mode,
2319 simplify_gen_binary (MULT, mode,
2320 in1, in2),
2321 op0);
2324 /* Canonicalize (minus (neg A) (mult B C)) to
2325 (minus (mult (neg B) C) A). */
2326 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2327 && GET_CODE (op1) == MULT
2328 && GET_CODE (op0) == NEG)
2330 rtx in1, in2;
2332 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2333 in2 = XEXP (op1, 1);
2334 return simplify_gen_binary (MINUS, mode,
2335 simplify_gen_binary (MULT, mode,
2336 in1, in2),
2337 XEXP (op0, 0));
2340 /* If one of the operands is a PLUS or a MINUS, see if we can
2341 simplify this by the associative law. This will, for example,
2342 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2343 Don't use the associative law for floating point.
2344 The inaccuracy makes it nonassociative,
2345 and subtle programs can break if operations are associated. */
2347 if (INTEGRAL_MODE_P (mode)
2348 && (plus_minus_operand_p (op0)
2349 || plus_minus_operand_p (op1))
2350 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2351 return tem;
2352 break;
2354 case MULT:
2355 if (trueop1 == constm1_rtx)
2356 return simplify_gen_unary (NEG, mode, op0, mode);
2358 if (GET_CODE (op0) == NEG)
2360 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2361 /* If op1 is a MULT as well and simplify_unary_operation
2362 just moved the NEG to the second operand, simplify_gen_binary
2363 below could through simplify_associative_operation move
2364 the NEG around again and recurse endlessly. */
2365 if (temp
2366 && GET_CODE (op1) == MULT
2367 && GET_CODE (temp) == MULT
2368 && XEXP (op1, 0) == XEXP (temp, 0)
2369 && GET_CODE (XEXP (temp, 1)) == NEG
2370 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2371 temp = NULL_RTX;
2372 if (temp)
2373 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2375 if (GET_CODE (op1) == NEG)
2377 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2378 /* If op0 is a MULT as well and simplify_unary_operation
2379 just moved the NEG to the second operand, simplify_gen_binary
2380 below could through simplify_associative_operation move
2381 the NEG around again and recurse endlessly. */
2382 if (temp
2383 && GET_CODE (op0) == MULT
2384 && GET_CODE (temp) == MULT
2385 && XEXP (op0, 0) == XEXP (temp, 0)
2386 && GET_CODE (XEXP (temp, 1)) == NEG
2387 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2388 temp = NULL_RTX;
2389 if (temp)
2390 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2393 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2394 x is NaN, since x * 0 is then also NaN. Nor is it valid
2395 when the mode has signed zeros, since multiplying a negative
2396 number by 0 will give -0, not 0. */
2397 if (!HONOR_NANS (mode)
2398 && !HONOR_SIGNED_ZEROS (mode)
2399 && trueop1 == CONST0_RTX (mode)
2400 && ! side_effects_p (op0))
2401 return op1;
2403 /* In IEEE floating point, x*1 is not equivalent to x for
2404 signalling NaNs. */
2405 if (!HONOR_SNANS (mode)
2406 && trueop1 == CONST1_RTX (mode))
2407 return op0;
2409 /* Convert multiply by constant power of two into shift. */
2410 if (CONST_SCALAR_INT_P (trueop1))
2412 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2413 if (val >= 0)
2414 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2417 /* x*2 is x+x and x*(-1) is -x */
2418 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2419 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2420 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2421 && GET_MODE (op0) == mode)
2423 REAL_VALUE_TYPE d;
2424 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2426 if (REAL_VALUES_EQUAL (d, dconst2))
2427 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2429 if (!HONOR_SNANS (mode)
2430 && REAL_VALUES_EQUAL (d, dconstm1))
2431 return simplify_gen_unary (NEG, mode, op0, mode);
2434 /* Optimize -x * -x as x * x. */
2435 if (FLOAT_MODE_P (mode)
2436 && GET_CODE (op0) == NEG
2437 && GET_CODE (op1) == NEG
2438 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2439 && !side_effects_p (XEXP (op0, 0)))
2440 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2442 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2443 if (SCALAR_FLOAT_MODE_P (mode)
2444 && GET_CODE (op0) == ABS
2445 && GET_CODE (op1) == ABS
2446 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2447 && !side_effects_p (XEXP (op0, 0)))
2448 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2450 /* Reassociate multiplication, but for floating point MULTs
2451 only when the user specifies unsafe math optimizations. */
2452 if (! FLOAT_MODE_P (mode)
2453 || flag_unsafe_math_optimizations)
2455 tem = simplify_associative_operation (code, mode, op0, op1);
2456 if (tem)
2457 return tem;
2459 break;
2461 case IOR:
2462 if (trueop1 == CONST0_RTX (mode))
2463 return op0;
2464 if (INTEGRAL_MODE_P (mode)
2465 && trueop1 == CONSTM1_RTX (mode)
2466 && !side_effects_p (op0))
2467 return op1;
2468 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2469 return op0;
2470 /* A | (~A) -> -1 */
2471 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2472 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2473 && ! side_effects_p (op0)
2474 && SCALAR_INT_MODE_P (mode))
2475 return constm1_rtx;
2477 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2478 if (CONST_INT_P (op1)
2479 && HWI_COMPUTABLE_MODE_P (mode)
2480 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2481 && !side_effects_p (op0))
2482 return op1;
2484 /* Canonicalize (X & C1) | C2. */
2485 if (GET_CODE (op0) == AND
2486 && CONST_INT_P (trueop1)
2487 && CONST_INT_P (XEXP (op0, 1)))
2489 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2490 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2491 HOST_WIDE_INT c2 = INTVAL (trueop1);
2493 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2494 if ((c1 & c2) == c1
2495 && !side_effects_p (XEXP (op0, 0)))
2496 return trueop1;
2498 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2499 if (((c1|c2) & mask) == mask)
2500 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2502 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2503 if (((c1 & ~c2) & mask) != (c1 & mask))
2505 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2506 gen_int_mode (c1 & ~c2, mode));
2507 return simplify_gen_binary (IOR, mode, tem, op1);
2511 /* Convert (A & B) | A to A. */
2512 if (GET_CODE (op0) == AND
2513 && (rtx_equal_p (XEXP (op0, 0), op1)
2514 || rtx_equal_p (XEXP (op0, 1), op1))
2515 && ! side_effects_p (XEXP (op0, 0))
2516 && ! side_effects_p (XEXP (op0, 1)))
2517 return op1;
2519 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2520 mode size to (rotate A CX). */
2522 if (GET_CODE (op1) == ASHIFT
2523 || GET_CODE (op1) == SUBREG)
2525 opleft = op1;
2526 opright = op0;
2528 else
2530 opright = op1;
2531 opleft = op0;
2534 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2535 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2536 && CONST_INT_P (XEXP (opleft, 1))
2537 && CONST_INT_P (XEXP (opright, 1))
2538 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2539 == GET_MODE_PRECISION (mode)))
2540 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2542 /* Same, but for ashift that has been "simplified" to a wider mode
2543 by simplify_shift_const. */
2545 if (GET_CODE (opleft) == SUBREG
2546 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2547 && GET_CODE (opright) == LSHIFTRT
2548 && GET_CODE (XEXP (opright, 0)) == SUBREG
2549 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2550 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2551 && (GET_MODE_SIZE (GET_MODE (opleft))
2552 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2553 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2554 SUBREG_REG (XEXP (opright, 0)))
2555 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2556 && CONST_INT_P (XEXP (opright, 1))
2557 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2558 == GET_MODE_PRECISION (mode)))
2559 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2560 XEXP (SUBREG_REG (opleft), 1));
2562 /* If we have (ior (and (X C1) C2)), simplify this by making
2563 C1 as small as possible if C1 actually changes. */
2564 if (CONST_INT_P (op1)
2565 && (HWI_COMPUTABLE_MODE_P (mode)
2566 || INTVAL (op1) > 0)
2567 && GET_CODE (op0) == AND
2568 && CONST_INT_P (XEXP (op0, 1))
2569 && CONST_INT_P (op1)
2570 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2572 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2573 gen_int_mode (UINTVAL (XEXP (op0, 1))
2574 & ~UINTVAL (op1),
2575 mode));
2576 return simplify_gen_binary (IOR, mode, tmp, op1);
2579 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2580 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2581 the PLUS does not affect any of the bits in OP1: then we can do
2582 the IOR as a PLUS and we can associate. This is valid if OP1
2583 can be safely shifted left C bits. */
2584 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2585 && GET_CODE (XEXP (op0, 0)) == PLUS
2586 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2587 && CONST_INT_P (XEXP (op0, 1))
2588 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2590 int count = INTVAL (XEXP (op0, 1));
2591 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2593 if (mask >> count == INTVAL (trueop1)
2594 && trunc_int_for_mode (mask, mode) == mask
2595 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2596 return simplify_gen_binary (ASHIFTRT, mode,
2597 plus_constant (mode, XEXP (op0, 0),
2598 mask),
2599 XEXP (op0, 1));
2602 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2603 if (tem)
2604 return tem;
2606 tem = simplify_associative_operation (code, mode, op0, op1);
2607 if (tem)
2608 return tem;
2609 break;
2611 case XOR:
2612 if (trueop1 == CONST0_RTX (mode))
2613 return op0;
2614 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2615 return simplify_gen_unary (NOT, mode, op0, mode);
2616 if (rtx_equal_p (trueop0, trueop1)
2617 && ! side_effects_p (op0)
2618 && GET_MODE_CLASS (mode) != MODE_CC)
2619 return CONST0_RTX (mode);
2621 /* Canonicalize XOR of the most significant bit to PLUS. */
2622 if (CONST_SCALAR_INT_P (op1)
2623 && mode_signbit_p (mode, op1))
2624 return simplify_gen_binary (PLUS, mode, op0, op1);
2625 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2626 if (CONST_SCALAR_INT_P (op1)
2627 && GET_CODE (op0) == PLUS
2628 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2629 && mode_signbit_p (mode, XEXP (op0, 1)))
2630 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2631 simplify_gen_binary (XOR, mode, op1,
2632 XEXP (op0, 1)));
2634 /* If we are XORing two things that have no bits in common,
2635 convert them into an IOR. This helps to detect rotation encoded
2636 using those methods and possibly other simplifications. */
2638 if (HWI_COMPUTABLE_MODE_P (mode)
2639 && (nonzero_bits (op0, mode)
2640 & nonzero_bits (op1, mode)) == 0)
2641 return (simplify_gen_binary (IOR, mode, op0, op1));
2643 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2644 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2645 (NOT y). */
2647 int num_negated = 0;
2649 if (GET_CODE (op0) == NOT)
2650 num_negated++, op0 = XEXP (op0, 0);
2651 if (GET_CODE (op1) == NOT)
2652 num_negated++, op1 = XEXP (op1, 0);
2654 if (num_negated == 2)
2655 return simplify_gen_binary (XOR, mode, op0, op1);
2656 else if (num_negated == 1)
2657 return simplify_gen_unary (NOT, mode,
2658 simplify_gen_binary (XOR, mode, op0, op1),
2659 mode);
2662 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2663 correspond to a machine insn or result in further simplifications
2664 if B is a constant. */
2666 if (GET_CODE (op0) == AND
2667 && rtx_equal_p (XEXP (op0, 1), op1)
2668 && ! side_effects_p (op1))
2669 return simplify_gen_binary (AND, mode,
2670 simplify_gen_unary (NOT, mode,
2671 XEXP (op0, 0), mode),
2672 op1);
2674 else if (GET_CODE (op0) == AND
2675 && rtx_equal_p (XEXP (op0, 0), op1)
2676 && ! side_effects_p (op1))
2677 return simplify_gen_binary (AND, mode,
2678 simplify_gen_unary (NOT, mode,
2679 XEXP (op0, 1), mode),
2680 op1);
2682 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2683 we can transform like this:
2684 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2685 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2686 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2687 Attempt a few simplifications when B and C are both constants. */
2688 if (GET_CODE (op0) == AND
2689 && CONST_INT_P (op1)
2690 && CONST_INT_P (XEXP (op0, 1)))
2692 rtx a = XEXP (op0, 0);
2693 rtx b = XEXP (op0, 1);
2694 rtx c = op1;
2695 HOST_WIDE_INT bval = INTVAL (b);
2696 HOST_WIDE_INT cval = INTVAL (c);
2698 rtx na_c
2699 = simplify_binary_operation (AND, mode,
2700 simplify_gen_unary (NOT, mode, a, mode),
2702 if ((~cval & bval) == 0)
2704 /* Try to simplify ~A&C | ~B&C. */
2705 if (na_c != NULL_RTX)
2706 return simplify_gen_binary (IOR, mode, na_c,
2707 gen_int_mode (~bval & cval, mode));
2709 else
2711 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2712 if (na_c == const0_rtx)
2714 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2715 gen_int_mode (~cval & bval,
2716 mode));
2717 return simplify_gen_binary (IOR, mode, a_nc_b,
2718 gen_int_mode (~bval & cval,
2719 mode));
2724 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2725 comparison if STORE_FLAG_VALUE is 1. */
2726 if (STORE_FLAG_VALUE == 1
2727 && trueop1 == const1_rtx
2728 && COMPARISON_P (op0)
2729 && (reversed = reversed_comparison (op0, mode)))
2730 return reversed;
2732 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2733 is (lt foo (const_int 0)), so we can perform the above
2734 simplification if STORE_FLAG_VALUE is 1. */
2736 if (STORE_FLAG_VALUE == 1
2737 && trueop1 == const1_rtx
2738 && GET_CODE (op0) == LSHIFTRT
2739 && CONST_INT_P (XEXP (op0, 1))
2740 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2741 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2743 /* (xor (comparison foo bar) (const_int sign-bit))
2744 when STORE_FLAG_VALUE is the sign bit. */
2745 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2746 && trueop1 == const_true_rtx
2747 && COMPARISON_P (op0)
2748 && (reversed = reversed_comparison (op0, mode)))
2749 return reversed;
2751 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2752 if (tem)
2753 return tem;
2755 tem = simplify_associative_operation (code, mode, op0, op1);
2756 if (tem)
2757 return tem;
2758 break;
2760 case AND:
2761 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2762 return trueop1;
2763 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2764 return op0;
2765 if (HWI_COMPUTABLE_MODE_P (mode))
2767 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2768 HOST_WIDE_INT nzop1;
2769 if (CONST_INT_P (trueop1))
2771 HOST_WIDE_INT val1 = INTVAL (trueop1);
2772 /* If we are turning off bits already known off in OP0, we need
2773 not do an AND. */
2774 if ((nzop0 & ~val1) == 0)
2775 return op0;
2777 nzop1 = nonzero_bits (trueop1, mode);
2778 /* If we are clearing all the nonzero bits, the result is zero. */
2779 if ((nzop1 & nzop0) == 0
2780 && !side_effects_p (op0) && !side_effects_p (op1))
2781 return CONST0_RTX (mode);
2783 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2784 && GET_MODE_CLASS (mode) != MODE_CC)
2785 return op0;
2786 /* A & (~A) -> 0 */
2787 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2788 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2789 && ! side_effects_p (op0)
2790 && GET_MODE_CLASS (mode) != MODE_CC)
2791 return CONST0_RTX (mode);
2793 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2794 there are no nonzero bits of C outside of X's mode. */
2795 if ((GET_CODE (op0) == SIGN_EXTEND
2796 || GET_CODE (op0) == ZERO_EXTEND)
2797 && CONST_INT_P (trueop1)
2798 && HWI_COMPUTABLE_MODE_P (mode)
2799 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2800 & UINTVAL (trueop1)) == 0)
2802 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2803 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2804 gen_int_mode (INTVAL (trueop1),
2805 imode));
2806 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2809 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2810 we might be able to further simplify the AND with X and potentially
2811 remove the truncation altogether. */
2812 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2814 rtx x = XEXP (op0, 0);
2815 enum machine_mode xmode = GET_MODE (x);
2816 tem = simplify_gen_binary (AND, xmode, x,
2817 gen_int_mode (INTVAL (trueop1), xmode));
2818 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2821 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2822 if (GET_CODE (op0) == IOR
2823 && CONST_INT_P (trueop1)
2824 && CONST_INT_P (XEXP (op0, 1)))
2826 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2827 return simplify_gen_binary (IOR, mode,
2828 simplify_gen_binary (AND, mode,
2829 XEXP (op0, 0), op1),
2830 gen_int_mode (tmp, mode));
2833 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2834 insn (and may simplify more). */
2835 if (GET_CODE (op0) == XOR
2836 && rtx_equal_p (XEXP (op0, 0), op1)
2837 && ! side_effects_p (op1))
2838 return simplify_gen_binary (AND, mode,
2839 simplify_gen_unary (NOT, mode,
2840 XEXP (op0, 1), mode),
2841 op1);
2843 if (GET_CODE (op0) == XOR
2844 && rtx_equal_p (XEXP (op0, 1), op1)
2845 && ! side_effects_p (op1))
2846 return simplify_gen_binary (AND, mode,
2847 simplify_gen_unary (NOT, mode,
2848 XEXP (op0, 0), mode),
2849 op1);
2851 /* Similarly for (~(A ^ B)) & A. */
2852 if (GET_CODE (op0) == NOT
2853 && GET_CODE (XEXP (op0, 0)) == XOR
2854 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2855 && ! side_effects_p (op1))
2856 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2858 if (GET_CODE (op0) == NOT
2859 && GET_CODE (XEXP (op0, 0)) == XOR
2860 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2861 && ! side_effects_p (op1))
2862 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2864 /* Convert (A | B) & A to A. */
2865 if (GET_CODE (op0) == IOR
2866 && (rtx_equal_p (XEXP (op0, 0), op1)
2867 || rtx_equal_p (XEXP (op0, 1), op1))
2868 && ! side_effects_p (XEXP (op0, 0))
2869 && ! side_effects_p (XEXP (op0, 1)))
2870 return op1;
2872 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2873 ((A & N) + B) & M -> (A + B) & M
2874 Similarly if (N & M) == 0,
2875 ((A | N) + B) & M -> (A + B) & M
2876 and for - instead of + and/or ^ instead of |.
2877 Also, if (N & M) == 0, then
2878 (A +- N) & M -> A & M. */
2879 if (CONST_INT_P (trueop1)
2880 && HWI_COMPUTABLE_MODE_P (mode)
2881 && ~UINTVAL (trueop1)
2882 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2883 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2885 rtx pmop[2];
2886 int which;
2888 pmop[0] = XEXP (op0, 0);
2889 pmop[1] = XEXP (op0, 1);
2891 if (CONST_INT_P (pmop[1])
2892 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2893 return simplify_gen_binary (AND, mode, pmop[0], op1);
2895 for (which = 0; which < 2; which++)
2897 tem = pmop[which];
2898 switch (GET_CODE (tem))
2900 case AND:
2901 if (CONST_INT_P (XEXP (tem, 1))
2902 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2903 == UINTVAL (trueop1))
2904 pmop[which] = XEXP (tem, 0);
2905 break;
2906 case IOR:
2907 case XOR:
2908 if (CONST_INT_P (XEXP (tem, 1))
2909 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2910 pmop[which] = XEXP (tem, 0);
2911 break;
2912 default:
2913 break;
2917 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2919 tem = simplify_gen_binary (GET_CODE (op0), mode,
2920 pmop[0], pmop[1]);
2921 return simplify_gen_binary (code, mode, tem, op1);
2925 /* (and X (ior (not X) Y) -> (and X Y) */
2926 if (GET_CODE (op1) == IOR
2927 && GET_CODE (XEXP (op1, 0)) == NOT
2928 && op0 == XEXP (XEXP (op1, 0), 0))
2929 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2931 /* (and (ior (not X) Y) X) -> (and X Y) */
2932 if (GET_CODE (op0) == IOR
2933 && GET_CODE (XEXP (op0, 0)) == NOT
2934 && op1 == XEXP (XEXP (op0, 0), 0))
2935 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2937 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2938 if (tem)
2939 return tem;
2941 tem = simplify_associative_operation (code, mode, op0, op1);
2942 if (tem)
2943 return tem;
2944 break;
2946 case UDIV:
2947 /* 0/x is 0 (or x&0 if x has side-effects). */
2948 if (trueop0 == CONST0_RTX (mode))
2950 if (side_effects_p (op1))
2951 return simplify_gen_binary (AND, mode, op1, trueop0);
2952 return trueop0;
2954 /* x/1 is x. */
2955 if (trueop1 == CONST1_RTX (mode))
2957 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2958 if (tem)
2959 return tem;
2961 /* Convert divide by power of two into shift. */
2962 if (CONST_INT_P (trueop1)
2963 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2964 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2965 break;
2967 case DIV:
2968 /* Handle floating point and integers separately. */
2969 if (SCALAR_FLOAT_MODE_P (mode))
2971 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2972 safe for modes with NaNs, since 0.0 / 0.0 will then be
2973 NaN rather than 0.0. Nor is it safe for modes with signed
2974 zeros, since dividing 0 by a negative number gives -0.0 */
2975 if (trueop0 == CONST0_RTX (mode)
2976 && !HONOR_NANS (mode)
2977 && !HONOR_SIGNED_ZEROS (mode)
2978 && ! side_effects_p (op1))
2979 return op0;
2980 /* x/1.0 is x. */
2981 if (trueop1 == CONST1_RTX (mode)
2982 && !HONOR_SNANS (mode))
2983 return op0;
2985 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2986 && trueop1 != CONST0_RTX (mode))
2988 REAL_VALUE_TYPE d;
2989 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2991 /* x/-1.0 is -x. */
2992 if (REAL_VALUES_EQUAL (d, dconstm1)
2993 && !HONOR_SNANS (mode))
2994 return simplify_gen_unary (NEG, mode, op0, mode);
2996 /* Change FP division by a constant into multiplication.
2997 Only do this with -freciprocal-math. */
2998 if (flag_reciprocal_math
2999 && !REAL_VALUES_EQUAL (d, dconst0))
3001 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3002 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3003 return simplify_gen_binary (MULT, mode, op0, tem);
3007 else if (SCALAR_INT_MODE_P (mode))
3009 /* 0/x is 0 (or x&0 if x has side-effects). */
3010 if (trueop0 == CONST0_RTX (mode)
3011 && !cfun->can_throw_non_call_exceptions)
3013 if (side_effects_p (op1))
3014 return simplify_gen_binary (AND, mode, op1, trueop0);
3015 return trueop0;
3017 /* x/1 is x. */
3018 if (trueop1 == CONST1_RTX (mode))
3020 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3021 if (tem)
3022 return tem;
3024 /* x/-1 is -x. */
3025 if (trueop1 == constm1_rtx)
3027 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3028 if (x)
3029 return simplify_gen_unary (NEG, mode, x, mode);
3032 break;
3034 case UMOD:
3035 /* 0%x is 0 (or x&0 if x has side-effects). */
3036 if (trueop0 == CONST0_RTX (mode))
3038 if (side_effects_p (op1))
3039 return simplify_gen_binary (AND, mode, op1, trueop0);
3040 return trueop0;
3042 /* x%1 is 0 (of x&0 if x has side-effects). */
3043 if (trueop1 == CONST1_RTX (mode))
3045 if (side_effects_p (op0))
3046 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3047 return CONST0_RTX (mode);
3049 /* Implement modulus by power of two as AND. */
3050 if (CONST_INT_P (trueop1)
3051 && exact_log2 (UINTVAL (trueop1)) > 0)
3052 return simplify_gen_binary (AND, mode, op0,
3053 gen_int_mode (INTVAL (op1) - 1, mode));
3054 break;
3056 case MOD:
3057 /* 0%x is 0 (or x&0 if x has side-effects). */
3058 if (trueop0 == CONST0_RTX (mode))
3060 if (side_effects_p (op1))
3061 return simplify_gen_binary (AND, mode, op1, trueop0);
3062 return trueop0;
3064 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3065 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3067 if (side_effects_p (op0))
3068 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3069 return CONST0_RTX (mode);
3071 break;
3073 case ROTATERT:
3074 case ROTATE:
3075 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3076 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3077 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3078 amount instead. */
3079 if (CONST_INT_P (trueop1)
3080 && IN_RANGE (INTVAL (trueop1),
3081 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3082 GET_MODE_BITSIZE (mode) - 1))
3083 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3084 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3085 - INTVAL (trueop1)));
3086 /* FALLTHRU */
3087 case ASHIFTRT:
3088 if (trueop1 == CONST0_RTX (mode))
3089 return op0;
3090 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3091 return op0;
3092 /* Rotating ~0 always results in ~0. */
3093 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3094 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3095 && ! side_effects_p (op1))
3096 return op0;
3097 canonicalize_shift:
3098 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3100 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3101 if (val != INTVAL (op1))
3102 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3104 break;
3106 case ASHIFT:
3107 case SS_ASHIFT:
3108 case US_ASHIFT:
3109 if (trueop1 == CONST0_RTX (mode))
3110 return op0;
3111 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3112 return op0;
3113 goto canonicalize_shift;
3115 case LSHIFTRT:
3116 if (trueop1 == CONST0_RTX (mode))
3117 return op0;
3118 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3119 return op0;
3120 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3121 if (GET_CODE (op0) == CLZ
3122 && CONST_INT_P (trueop1)
3123 && STORE_FLAG_VALUE == 1
3124 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3126 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3127 unsigned HOST_WIDE_INT zero_val = 0;
3129 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3130 && zero_val == GET_MODE_PRECISION (imode)
3131 && INTVAL (trueop1) == exact_log2 (zero_val))
3132 return simplify_gen_relational (EQ, mode, imode,
3133 XEXP (op0, 0), const0_rtx);
3135 goto canonicalize_shift;
3137 case SMIN:
3138 if (width <= HOST_BITS_PER_WIDE_INT
3139 && mode_signbit_p (mode, trueop1)
3140 && ! side_effects_p (op0))
3141 return op1;
3142 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3143 return op0;
3144 tem = simplify_associative_operation (code, mode, op0, op1);
3145 if (tem)
3146 return tem;
3147 break;
3149 case SMAX:
3150 if (width <= HOST_BITS_PER_WIDE_INT
3151 && CONST_INT_P (trueop1)
3152 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3153 && ! side_effects_p (op0))
3154 return op1;
3155 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3156 return op0;
3157 tem = simplify_associative_operation (code, mode, op0, op1);
3158 if (tem)
3159 return tem;
3160 break;
3162 case UMIN:
3163 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3164 return op1;
3165 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3166 return op0;
3167 tem = simplify_associative_operation (code, mode, op0, op1);
3168 if (tem)
3169 return tem;
3170 break;
3172 case UMAX:
3173 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3174 return op1;
3175 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3176 return op0;
3177 tem = simplify_associative_operation (code, mode, op0, op1);
3178 if (tem)
3179 return tem;
3180 break;
3182 case SS_PLUS:
3183 case US_PLUS:
3184 case SS_MINUS:
3185 case US_MINUS:
3186 case SS_MULT:
3187 case US_MULT:
3188 case SS_DIV:
3189 case US_DIV:
3190 /* ??? There are simplifications that can be done. */
3191 return 0;
3193 case VEC_SELECT:
3194 if (!VECTOR_MODE_P (mode))
3196 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3197 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3198 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3199 gcc_assert (XVECLEN (trueop1, 0) == 1);
3200 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3202 if (GET_CODE (trueop0) == CONST_VECTOR)
3203 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3204 (trueop1, 0, 0)));
3206 /* Extract a scalar element from a nested VEC_SELECT expression
3207 (with optional nested VEC_CONCAT expression). Some targets
3208 (i386) extract scalar element from a vector using chain of
3209 nested VEC_SELECT expressions. When input operand is a memory
3210 operand, this operation can be simplified to a simple scalar
3211 load from an offseted memory address. */
3212 if (GET_CODE (trueop0) == VEC_SELECT)
3214 rtx op0 = XEXP (trueop0, 0);
3215 rtx op1 = XEXP (trueop0, 1);
3217 enum machine_mode opmode = GET_MODE (op0);
3218 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3219 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3221 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3222 int elem;
3224 rtvec vec;
3225 rtx tmp_op, tmp;
3227 gcc_assert (GET_CODE (op1) == PARALLEL);
3228 gcc_assert (i < n_elts);
3230 /* Select element, pointed by nested selector. */
3231 elem = INTVAL (XVECEXP (op1, 0, i));
3233 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3234 if (GET_CODE (op0) == VEC_CONCAT)
3236 rtx op00 = XEXP (op0, 0);
3237 rtx op01 = XEXP (op0, 1);
3239 enum machine_mode mode00, mode01;
3240 int n_elts00, n_elts01;
3242 mode00 = GET_MODE (op00);
3243 mode01 = GET_MODE (op01);
3245 /* Find out number of elements of each operand. */
3246 if (VECTOR_MODE_P (mode00))
3248 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3249 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3251 else
3252 n_elts00 = 1;
3254 if (VECTOR_MODE_P (mode01))
3256 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3257 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3259 else
3260 n_elts01 = 1;
3262 gcc_assert (n_elts == n_elts00 + n_elts01);
3264 /* Select correct operand of VEC_CONCAT
3265 and adjust selector. */
3266 if (elem < n_elts01)
3267 tmp_op = op00;
3268 else
3270 tmp_op = op01;
3271 elem -= n_elts00;
3274 else
3275 tmp_op = op0;
3277 vec = rtvec_alloc (1);
3278 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3280 tmp = gen_rtx_fmt_ee (code, mode,
3281 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3282 return tmp;
3284 if (GET_CODE (trueop0) == VEC_DUPLICATE
3285 && GET_MODE (XEXP (trueop0, 0)) == mode)
3286 return XEXP (trueop0, 0);
3288 else
3290 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3291 gcc_assert (GET_MODE_INNER (mode)
3292 == GET_MODE_INNER (GET_MODE (trueop0)));
3293 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3295 if (GET_CODE (trueop0) == CONST_VECTOR)
3297 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3298 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3299 rtvec v = rtvec_alloc (n_elts);
3300 unsigned int i;
3302 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3303 for (i = 0; i < n_elts; i++)
3305 rtx x = XVECEXP (trueop1, 0, i);
3307 gcc_assert (CONST_INT_P (x));
3308 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3309 INTVAL (x));
3312 return gen_rtx_CONST_VECTOR (mode, v);
3315 /* Recognize the identity. */
3316 if (GET_MODE (trueop0) == mode)
3318 bool maybe_ident = true;
3319 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3321 rtx j = XVECEXP (trueop1, 0, i);
3322 if (!CONST_INT_P (j) || INTVAL (j) != i)
3324 maybe_ident = false;
3325 break;
3328 if (maybe_ident)
3329 return trueop0;
3332 /* If we build {a,b} then permute it, build the result directly. */
3333 if (XVECLEN (trueop1, 0) == 2
3334 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3335 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3336 && GET_CODE (trueop0) == VEC_CONCAT
3337 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3338 && GET_MODE (XEXP (trueop0, 0)) == mode
3339 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3340 && GET_MODE (XEXP (trueop0, 1)) == mode)
3342 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3343 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3344 rtx subop0, subop1;
3346 gcc_assert (i0 < 4 && i1 < 4);
3347 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3348 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3350 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3353 if (XVECLEN (trueop1, 0) == 2
3354 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3355 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3356 && GET_CODE (trueop0) == VEC_CONCAT
3357 && GET_MODE (trueop0) == mode)
3359 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3360 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3361 rtx subop0, subop1;
3363 gcc_assert (i0 < 2 && i1 < 2);
3364 subop0 = XEXP (trueop0, i0);
3365 subop1 = XEXP (trueop0, i1);
3367 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3371 if (XVECLEN (trueop1, 0) == 1
3372 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3373 && GET_CODE (trueop0) == VEC_CONCAT)
3375 rtx vec = trueop0;
3376 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3378 /* Try to find the element in the VEC_CONCAT. */
3379 while (GET_MODE (vec) != mode
3380 && GET_CODE (vec) == VEC_CONCAT)
3382 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3383 if (offset < vec_size)
3384 vec = XEXP (vec, 0);
3385 else
3387 offset -= vec_size;
3388 vec = XEXP (vec, 1);
3390 vec = avoid_constant_pool_reference (vec);
3393 if (GET_MODE (vec) == mode)
3394 return vec;
3397 /* If we select elements in a vec_merge that all come from the same
3398 operand, select from that operand directly. */
3399 if (GET_CODE (op0) == VEC_MERGE)
3401 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3402 if (CONST_INT_P (trueop02))
3404 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3405 bool all_operand0 = true;
3406 bool all_operand1 = true;
3407 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3409 rtx j = XVECEXP (trueop1, 0, i);
3410 if (sel & (1 << UINTVAL (j)))
3411 all_operand1 = false;
3412 else
3413 all_operand0 = false;
3415 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3416 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3417 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3418 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3422 /* If we have two nested selects that are inverses of each
3423 other, replace them with the source operand. */
3424 if (GET_CODE (trueop0) == VEC_SELECT
3425 && GET_MODE (XEXP (trueop0, 0)) == mode)
3427 rtx op0_subop1 = XEXP (trueop0, 1);
3428 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3429 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3431 /* Apply the outer ordering vector to the inner one. (The inner
3432 ordering vector is expressly permitted to be of a different
3433 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3434 then the two VEC_SELECTs cancel. */
3435 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3437 rtx x = XVECEXP (trueop1, 0, i);
3438 if (!CONST_INT_P (x))
3439 return 0;
3440 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3441 if (!CONST_INT_P (y) || i != INTVAL (y))
3442 return 0;
3444 return XEXP (trueop0, 0);
3447 return 0;
3448 case VEC_CONCAT:
3450 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3451 ? GET_MODE (trueop0)
3452 : GET_MODE_INNER (mode));
3453 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3454 ? GET_MODE (trueop1)
3455 : GET_MODE_INNER (mode));
3457 gcc_assert (VECTOR_MODE_P (mode));
3458 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3459 == GET_MODE_SIZE (mode));
3461 if (VECTOR_MODE_P (op0_mode))
3462 gcc_assert (GET_MODE_INNER (mode)
3463 == GET_MODE_INNER (op0_mode));
3464 else
3465 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3467 if (VECTOR_MODE_P (op1_mode))
3468 gcc_assert (GET_MODE_INNER (mode)
3469 == GET_MODE_INNER (op1_mode));
3470 else
3471 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3473 if ((GET_CODE (trueop0) == CONST_VECTOR
3474 || CONST_SCALAR_INT_P (trueop0)
3475 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3476 && (GET_CODE (trueop1) == CONST_VECTOR
3477 || CONST_SCALAR_INT_P (trueop1)
3478 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3480 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3481 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3482 rtvec v = rtvec_alloc (n_elts);
3483 unsigned int i;
3484 unsigned in_n_elts = 1;
3486 if (VECTOR_MODE_P (op0_mode))
3487 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3488 for (i = 0; i < n_elts; i++)
3490 if (i < in_n_elts)
3492 if (!VECTOR_MODE_P (op0_mode))
3493 RTVEC_ELT (v, i) = trueop0;
3494 else
3495 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3497 else
3499 if (!VECTOR_MODE_P (op1_mode))
3500 RTVEC_ELT (v, i) = trueop1;
3501 else
3502 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3503 i - in_n_elts);
3507 return gen_rtx_CONST_VECTOR (mode, v);
3510 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3511 Restrict the transformation to avoid generating a VEC_SELECT with a
3512 mode unrelated to its operand. */
3513 if (GET_CODE (trueop0) == VEC_SELECT
3514 && GET_CODE (trueop1) == VEC_SELECT
3515 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3516 && GET_MODE (XEXP (trueop0, 0)) == mode)
3518 rtx par0 = XEXP (trueop0, 1);
3519 rtx par1 = XEXP (trueop1, 1);
3520 int len0 = XVECLEN (par0, 0);
3521 int len1 = XVECLEN (par1, 0);
3522 rtvec vec = rtvec_alloc (len0 + len1);
3523 for (int i = 0; i < len0; i++)
3524 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3525 for (int i = 0; i < len1; i++)
3526 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3527 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3528 gen_rtx_PARALLEL (VOIDmode, vec));
3531 return 0;
3533 default:
3534 gcc_unreachable ();
3537 return 0;
3541 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3542 rtx op0, rtx op1)
3544 unsigned int width = GET_MODE_PRECISION (mode);
3546 if (VECTOR_MODE_P (mode)
3547 && code != VEC_CONCAT
3548 && GET_CODE (op0) == CONST_VECTOR
3549 && GET_CODE (op1) == CONST_VECTOR)
3551 unsigned n_elts = GET_MODE_NUNITS (mode);
3552 enum machine_mode op0mode = GET_MODE (op0);
3553 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3554 enum machine_mode op1mode = GET_MODE (op1);
3555 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3556 rtvec v = rtvec_alloc (n_elts);
3557 unsigned int i;
3559 gcc_assert (op0_n_elts == n_elts);
3560 gcc_assert (op1_n_elts == n_elts);
3561 for (i = 0; i < n_elts; i++)
3563 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3564 CONST_VECTOR_ELT (op0, i),
3565 CONST_VECTOR_ELT (op1, i));
3566 if (!x)
3567 return 0;
3568 RTVEC_ELT (v, i) = x;
3571 return gen_rtx_CONST_VECTOR (mode, v);
3574 if (VECTOR_MODE_P (mode)
3575 && code == VEC_CONCAT
3576 && (CONST_SCALAR_INT_P (op0)
3577 || GET_CODE (op0) == CONST_FIXED
3578 || CONST_DOUBLE_AS_FLOAT_P (op0))
3579 && (CONST_SCALAR_INT_P (op1)
3580 || CONST_DOUBLE_AS_FLOAT_P (op1)
3581 || GET_CODE (op1) == CONST_FIXED))
3583 unsigned n_elts = GET_MODE_NUNITS (mode);
3584 rtvec v = rtvec_alloc (n_elts);
3586 gcc_assert (n_elts >= 2);
3587 if (n_elts == 2)
3589 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3590 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3592 RTVEC_ELT (v, 0) = op0;
3593 RTVEC_ELT (v, 1) = op1;
3595 else
3597 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3598 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3599 unsigned i;
3601 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3602 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3603 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3605 for (i = 0; i < op0_n_elts; ++i)
3606 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3607 for (i = 0; i < op1_n_elts; ++i)
3608 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3611 return gen_rtx_CONST_VECTOR (mode, v);
3614 if (SCALAR_FLOAT_MODE_P (mode)
3615 && CONST_DOUBLE_AS_FLOAT_P (op0)
3616 && CONST_DOUBLE_AS_FLOAT_P (op1)
3617 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3619 if (code == AND
3620 || code == IOR
3621 || code == XOR)
3623 long tmp0[4];
3624 long tmp1[4];
3625 REAL_VALUE_TYPE r;
3626 int i;
3628 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3629 GET_MODE (op0));
3630 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3631 GET_MODE (op1));
3632 for (i = 0; i < 4; i++)
3634 switch (code)
3636 case AND:
3637 tmp0[i] &= tmp1[i];
3638 break;
3639 case IOR:
3640 tmp0[i] |= tmp1[i];
3641 break;
3642 case XOR:
3643 tmp0[i] ^= tmp1[i];
3644 break;
3645 default:
3646 gcc_unreachable ();
3649 real_from_target (&r, tmp0, mode);
3650 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3652 else
3654 REAL_VALUE_TYPE f0, f1, value, result;
3655 bool inexact;
3657 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3658 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3659 real_convert (&f0, mode, &f0);
3660 real_convert (&f1, mode, &f1);
3662 if (HONOR_SNANS (mode)
3663 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3664 return 0;
3666 if (code == DIV
3667 && REAL_VALUES_EQUAL (f1, dconst0)
3668 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3669 return 0;
3671 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3672 && flag_trapping_math
3673 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3675 int s0 = REAL_VALUE_NEGATIVE (f0);
3676 int s1 = REAL_VALUE_NEGATIVE (f1);
3678 switch (code)
3680 case PLUS:
3681 /* Inf + -Inf = NaN plus exception. */
3682 if (s0 != s1)
3683 return 0;
3684 break;
3685 case MINUS:
3686 /* Inf - Inf = NaN plus exception. */
3687 if (s0 == s1)
3688 return 0;
3689 break;
3690 case DIV:
3691 /* Inf / Inf = NaN plus exception. */
3692 return 0;
3693 default:
3694 break;
3698 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3699 && flag_trapping_math
3700 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3701 || (REAL_VALUE_ISINF (f1)
3702 && REAL_VALUES_EQUAL (f0, dconst0))))
3703 /* Inf * 0 = NaN plus exception. */
3704 return 0;
3706 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3707 &f0, &f1);
3708 real_convert (&result, mode, &value);
3710 /* Don't constant fold this floating point operation if
3711 the result has overflowed and flag_trapping_math. */
3713 if (flag_trapping_math
3714 && MODE_HAS_INFINITIES (mode)
3715 && REAL_VALUE_ISINF (result)
3716 && !REAL_VALUE_ISINF (f0)
3717 && !REAL_VALUE_ISINF (f1))
3718 /* Overflow plus exception. */
3719 return 0;
3721 /* Don't constant fold this floating point operation if the
3722 result may dependent upon the run-time rounding mode and
3723 flag_rounding_math is set, or if GCC's software emulation
3724 is unable to accurately represent the result. */
3726 if ((flag_rounding_math
3727 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3728 && (inexact || !real_identical (&result, &value)))
3729 return NULL_RTX;
3731 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3735 /* We can fold some multi-word operations. */
3736 if (GET_MODE_CLASS (mode) == MODE_INT
3737 && CONST_SCALAR_INT_P (op0)
3738 && CONST_SCALAR_INT_P (op1))
3740 wide_int result;
3741 bool overflow;
3742 rtx_mode_t pop0 = std::make_pair (op0, mode);
3743 rtx_mode_t pop1 = std::make_pair (op1, mode);
3745 #if TARGET_SUPPORTS_WIDE_INT == 0
3746 /* This assert keeps the simplification from producing a result
3747 that cannot be represented in a CONST_DOUBLE but a lot of
3748 upstream callers expect that this function never fails to
3749 simplify something and so you if you added this to the test
3750 above the code would die later anyway. If this assert
3751 happens, you just need to make the port support wide int. */
3752 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3753 #endif
3754 switch (code)
3756 case MINUS:
3757 result = wi::sub (pop0, pop1);
3758 break;
3760 case PLUS:
3761 result = wi::add (pop0, pop1);
3762 break;
3764 case MULT:
3765 result = wi::mul (pop0, pop1);
3766 break;
3768 case DIV:
3769 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3770 if (overflow)
3771 return NULL_RTX;
3772 break;
3774 case MOD:
3775 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3776 if (overflow)
3777 return NULL_RTX;
3778 break;
3780 case UDIV:
3781 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3782 if (overflow)
3783 return NULL_RTX;
3784 break;
3786 case UMOD:
3787 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3788 if (overflow)
3789 return NULL_RTX;
3790 break;
3792 case AND:
3793 result = wi::bit_and (pop0, pop1);
3794 break;
3796 case IOR:
3797 result = wi::bit_or (pop0, pop1);
3798 break;
3800 case XOR:
3801 result = wi::bit_xor (pop0, pop1);
3802 break;
3804 case SMIN:
3805 result = wi::smin (pop0, pop1);
3806 break;
3808 case SMAX:
3809 result = wi::smax (pop0, pop1);
3810 break;
3812 case UMIN:
3813 result = wi::umin (pop0, pop1);
3814 break;
3816 case UMAX:
3817 result = wi::umax (pop0, pop1);
3818 break;
3820 case LSHIFTRT:
3821 case ASHIFTRT:
3822 case ASHIFT:
3824 wide_int wop1 = pop1;
3825 if (SHIFT_COUNT_TRUNCATED)
3826 wop1 = wi::umod_trunc (wop1, width);
3827 else if (wi::geu_p (wop1, width))
3828 return NULL_RTX;
3830 switch (code)
3832 case LSHIFTRT:
3833 result = wi::lrshift (pop0, wop1);
3834 break;
3836 case ASHIFTRT:
3837 result = wi::arshift (pop0, wop1);
3838 break;
3840 case ASHIFT:
3841 result = wi::lshift (pop0, wop1);
3842 break;
3844 default:
3845 gcc_unreachable ();
3847 break;
3849 case ROTATE:
3850 case ROTATERT:
3852 if (wi::neg_p (pop1))
3853 return NULL_RTX;
3855 switch (code)
3857 case ROTATE:
3858 result = wi::lrotate (pop0, pop1);
3859 break;
3861 case ROTATERT:
3862 result = wi::rrotate (pop0, pop1);
3863 break;
3865 default:
3866 gcc_unreachable ();
3868 break;
3870 default:
3871 return NULL_RTX;
3873 return immed_wide_int_const (result, mode);
3876 return NULL_RTX;
3881 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3882 PLUS or MINUS.
3884 Rather than test for specific case, we do this by a brute-force method
3885 and do all possible simplifications until no more changes occur. Then
3886 we rebuild the operation. */
3888 struct simplify_plus_minus_op_data
3890 rtx op;
3891 short neg;
3894 static bool
3895 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3897 int result;
3899 result = (commutative_operand_precedence (y)
3900 - commutative_operand_precedence (x));
3901 if (result)
3902 return result > 0;
3904 /* Group together equal REGs to do more simplification. */
3905 if (REG_P (x) && REG_P (y))
3906 return REGNO (x) > REGNO (y);
3907 else
3908 return false;
3911 static rtx
3912 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3913 rtx op1)
3915 struct simplify_plus_minus_op_data ops[8];
3916 rtx result, tem;
3917 int n_ops = 2, input_ops = 2;
3918 int changed, n_constants = 0, canonicalized = 0;
3919 int i, j;
3921 memset (ops, 0, sizeof ops);
3923 /* Set up the two operands and then expand them until nothing has been
3924 changed. If we run out of room in our array, give up; this should
3925 almost never happen. */
3927 ops[0].op = op0;
3928 ops[0].neg = 0;
3929 ops[1].op = op1;
3930 ops[1].neg = (code == MINUS);
3934 changed = 0;
3936 for (i = 0; i < n_ops; i++)
3938 rtx this_op = ops[i].op;
3939 int this_neg = ops[i].neg;
3940 enum rtx_code this_code = GET_CODE (this_op);
3942 switch (this_code)
3944 case PLUS:
3945 case MINUS:
3946 if (n_ops == 7)
3947 return NULL_RTX;
3949 ops[n_ops].op = XEXP (this_op, 1);
3950 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3951 n_ops++;
3953 ops[i].op = XEXP (this_op, 0);
3954 input_ops++;
3955 changed = 1;
3956 canonicalized |= this_neg;
3957 break;
3959 case NEG:
3960 ops[i].op = XEXP (this_op, 0);
3961 ops[i].neg = ! this_neg;
3962 changed = 1;
3963 canonicalized = 1;
3964 break;
3966 case CONST:
3967 if (n_ops < 7
3968 && GET_CODE (XEXP (this_op, 0)) == PLUS
3969 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3970 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3972 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3973 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3974 ops[n_ops].neg = this_neg;
3975 n_ops++;
3976 changed = 1;
3977 canonicalized = 1;
3979 break;
3981 case NOT:
3982 /* ~a -> (-a - 1) */
3983 if (n_ops != 7)
3985 ops[n_ops].op = CONSTM1_RTX (mode);
3986 ops[n_ops++].neg = this_neg;
3987 ops[i].op = XEXP (this_op, 0);
3988 ops[i].neg = !this_neg;
3989 changed = 1;
3990 canonicalized = 1;
3992 break;
3994 case CONST_INT:
3995 n_constants++;
3996 if (this_neg)
3998 ops[i].op = neg_const_int (mode, this_op);
3999 ops[i].neg = 0;
4000 changed = 1;
4001 canonicalized = 1;
4003 break;
4005 default:
4006 break;
4010 while (changed);
4012 if (n_constants > 1)
4013 canonicalized = 1;
4015 gcc_assert (n_ops >= 2);
4017 /* If we only have two operands, we can avoid the loops. */
4018 if (n_ops == 2)
4020 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4021 rtx lhs, rhs;
4023 /* Get the two operands. Be careful with the order, especially for
4024 the cases where code == MINUS. */
4025 if (ops[0].neg && ops[1].neg)
4027 lhs = gen_rtx_NEG (mode, ops[0].op);
4028 rhs = ops[1].op;
4030 else if (ops[0].neg)
4032 lhs = ops[1].op;
4033 rhs = ops[0].op;
4035 else
4037 lhs = ops[0].op;
4038 rhs = ops[1].op;
4041 return simplify_const_binary_operation (code, mode, lhs, rhs);
4044 /* Now simplify each pair of operands until nothing changes. */
4047 /* Insertion sort is good enough for an eight-element array. */
4048 for (i = 1; i < n_ops; i++)
4050 struct simplify_plus_minus_op_data save;
4051 j = i - 1;
4052 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4053 continue;
4055 canonicalized = 1;
4056 save = ops[i];
4058 ops[j + 1] = ops[j];
4059 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4060 ops[j + 1] = save;
4063 changed = 0;
4064 for (i = n_ops - 1; i > 0; i--)
4065 for (j = i - 1; j >= 0; j--)
4067 rtx lhs = ops[j].op, rhs = ops[i].op;
4068 int lneg = ops[j].neg, rneg = ops[i].neg;
4070 if (lhs != 0 && rhs != 0)
4072 enum rtx_code ncode = PLUS;
4074 if (lneg != rneg)
4076 ncode = MINUS;
4077 if (lneg)
4078 tem = lhs, lhs = rhs, rhs = tem;
4080 else if (swap_commutative_operands_p (lhs, rhs))
4081 tem = lhs, lhs = rhs, rhs = tem;
4083 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4084 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4086 rtx tem_lhs, tem_rhs;
4088 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4089 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4090 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4092 if (tem && !CONSTANT_P (tem))
4093 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4095 else
4096 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4098 /* Reject "simplifications" that just wrap the two
4099 arguments in a CONST. Failure to do so can result
4100 in infinite recursion with simplify_binary_operation
4101 when it calls us to simplify CONST operations. */
4102 if (tem
4103 && ! (GET_CODE (tem) == CONST
4104 && GET_CODE (XEXP (tem, 0)) == ncode
4105 && XEXP (XEXP (tem, 0), 0) == lhs
4106 && XEXP (XEXP (tem, 0), 1) == rhs))
4108 lneg &= rneg;
4109 if (GET_CODE (tem) == NEG)
4110 tem = XEXP (tem, 0), lneg = !lneg;
4111 if (CONST_INT_P (tem) && lneg)
4112 tem = neg_const_int (mode, tem), lneg = 0;
4114 ops[i].op = tem;
4115 ops[i].neg = lneg;
4116 ops[j].op = NULL_RTX;
4117 changed = 1;
4118 canonicalized = 1;
4123 /* If nothing changed, fail. */
4124 if (!canonicalized)
4125 return NULL_RTX;
4127 /* Pack all the operands to the lower-numbered entries. */
4128 for (i = 0, j = 0; j < n_ops; j++)
4129 if (ops[j].op)
4131 ops[i] = ops[j];
4132 i++;
4134 n_ops = i;
4136 while (changed);
4138 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4139 if (n_ops == 2
4140 && CONST_INT_P (ops[1].op)
4141 && CONSTANT_P (ops[0].op)
4142 && ops[0].neg)
4143 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4145 /* We suppressed creation of trivial CONST expressions in the
4146 combination loop to avoid recursion. Create one manually now.
4147 The combination loop should have ensured that there is exactly
4148 one CONST_INT, and the sort will have ensured that it is last
4149 in the array and that any other constant will be next-to-last. */
4151 if (n_ops > 1
4152 && CONST_INT_P (ops[n_ops - 1].op)
4153 && CONSTANT_P (ops[n_ops - 2].op))
4155 rtx value = ops[n_ops - 1].op;
4156 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4157 value = neg_const_int (mode, value);
4158 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4159 INTVAL (value));
4160 n_ops--;
4163 /* Put a non-negated operand first, if possible. */
4165 for (i = 0; i < n_ops && ops[i].neg; i++)
4166 continue;
4167 if (i == n_ops)
4168 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4169 else if (i != 0)
4171 tem = ops[0].op;
4172 ops[0] = ops[i];
4173 ops[i].op = tem;
4174 ops[i].neg = 1;
4177 /* Now make the result by performing the requested operations. */
4178 result = ops[0].op;
4179 for (i = 1; i < n_ops; i++)
4180 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4181 mode, result, ops[i].op);
4183 return result;
4186 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4187 static bool
4188 plus_minus_operand_p (const_rtx x)
4190 return GET_CODE (x) == PLUS
4191 || GET_CODE (x) == MINUS
4192 || (GET_CODE (x) == CONST
4193 && GET_CODE (XEXP (x, 0)) == PLUS
4194 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4195 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4198 /* Like simplify_binary_operation except used for relational operators.
4199 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4200 not also be VOIDmode.
4202 CMP_MODE specifies in which mode the comparison is done in, so it is
4203 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4204 the operands or, if both are VOIDmode, the operands are compared in
4205 "infinite precision". */
4207 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4208 enum machine_mode cmp_mode, rtx op0, rtx op1)
4210 rtx tem, trueop0, trueop1;
4212 if (cmp_mode == VOIDmode)
4213 cmp_mode = GET_MODE (op0);
4214 if (cmp_mode == VOIDmode)
4215 cmp_mode = GET_MODE (op1);
4217 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4218 if (tem)
4220 if (SCALAR_FLOAT_MODE_P (mode))
4222 if (tem == const0_rtx)
4223 return CONST0_RTX (mode);
4224 #ifdef FLOAT_STORE_FLAG_VALUE
4226 REAL_VALUE_TYPE val;
4227 val = FLOAT_STORE_FLAG_VALUE (mode);
4228 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4230 #else
4231 return NULL_RTX;
4232 #endif
4234 if (VECTOR_MODE_P (mode))
4236 if (tem == const0_rtx)
4237 return CONST0_RTX (mode);
4238 #ifdef VECTOR_STORE_FLAG_VALUE
4240 int i, units;
4241 rtvec v;
4243 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4244 if (val == NULL_RTX)
4245 return NULL_RTX;
4246 if (val == const1_rtx)
4247 return CONST1_RTX (mode);
4249 units = GET_MODE_NUNITS (mode);
4250 v = rtvec_alloc (units);
4251 for (i = 0; i < units; i++)
4252 RTVEC_ELT (v, i) = val;
4253 return gen_rtx_raw_CONST_VECTOR (mode, v);
4255 #else
4256 return NULL_RTX;
4257 #endif
4260 return tem;
4263 /* For the following tests, ensure const0_rtx is op1. */
4264 if (swap_commutative_operands_p (op0, op1)
4265 || (op0 == const0_rtx && op1 != const0_rtx))
4266 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4268 /* If op0 is a compare, extract the comparison arguments from it. */
4269 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4270 return simplify_gen_relational (code, mode, VOIDmode,
4271 XEXP (op0, 0), XEXP (op0, 1));
4273 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4274 || CC0_P (op0))
4275 return NULL_RTX;
4277 trueop0 = avoid_constant_pool_reference (op0);
4278 trueop1 = avoid_constant_pool_reference (op1);
4279 return simplify_relational_operation_1 (code, mode, cmp_mode,
4280 trueop0, trueop1);
4283 /* This part of simplify_relational_operation is only used when CMP_MODE
4284 is not in class MODE_CC (i.e. it is a real comparison).
4286 MODE is the mode of the result, while CMP_MODE specifies in which
4287 mode the comparison is done in, so it is the mode of the operands. */
4289 static rtx
4290 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4291 enum machine_mode cmp_mode, rtx op0, rtx op1)
4293 enum rtx_code op0code = GET_CODE (op0);
4295 if (op1 == const0_rtx && COMPARISON_P (op0))
4297 /* If op0 is a comparison, extract the comparison arguments
4298 from it. */
4299 if (code == NE)
4301 if (GET_MODE (op0) == mode)
4302 return simplify_rtx (op0);
4303 else
4304 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4305 XEXP (op0, 0), XEXP (op0, 1));
4307 else if (code == EQ)
4309 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4310 if (new_code != UNKNOWN)
4311 return simplify_gen_relational (new_code, mode, VOIDmode,
4312 XEXP (op0, 0), XEXP (op0, 1));
4316 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4317 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4318 if ((code == LTU || code == GEU)
4319 && GET_CODE (op0) == PLUS
4320 && CONST_INT_P (XEXP (op0, 1))
4321 && (rtx_equal_p (op1, XEXP (op0, 0))
4322 || rtx_equal_p (op1, XEXP (op0, 1)))
4323 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4324 && XEXP (op0, 1) != const0_rtx)
4326 rtx new_cmp
4327 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4328 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4329 cmp_mode, XEXP (op0, 0), new_cmp);
4332 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4333 if ((code == LTU || code == GEU)
4334 && GET_CODE (op0) == PLUS
4335 && rtx_equal_p (op1, XEXP (op0, 1))
4336 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4337 && !rtx_equal_p (op1, XEXP (op0, 0)))
4338 return simplify_gen_relational (code, mode, cmp_mode, op0,
4339 copy_rtx (XEXP (op0, 0)));
4341 if (op1 == const0_rtx)
4343 /* Canonicalize (GTU x 0) as (NE x 0). */
4344 if (code == GTU)
4345 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4346 /* Canonicalize (LEU x 0) as (EQ x 0). */
4347 if (code == LEU)
4348 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4350 else if (op1 == const1_rtx)
4352 switch (code)
4354 case GE:
4355 /* Canonicalize (GE x 1) as (GT x 0). */
4356 return simplify_gen_relational (GT, mode, cmp_mode,
4357 op0, const0_rtx);
4358 case GEU:
4359 /* Canonicalize (GEU x 1) as (NE x 0). */
4360 return simplify_gen_relational (NE, mode, cmp_mode,
4361 op0, const0_rtx);
4362 case LT:
4363 /* Canonicalize (LT x 1) as (LE x 0). */
4364 return simplify_gen_relational (LE, mode, cmp_mode,
4365 op0, const0_rtx);
4366 case LTU:
4367 /* Canonicalize (LTU x 1) as (EQ x 0). */
4368 return simplify_gen_relational (EQ, mode, cmp_mode,
4369 op0, const0_rtx);
4370 default:
4371 break;
4374 else if (op1 == constm1_rtx)
4376 /* Canonicalize (LE x -1) as (LT x 0). */
4377 if (code == LE)
4378 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4379 /* Canonicalize (GT x -1) as (GE x 0). */
4380 if (code == GT)
4381 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4384 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4385 if ((code == EQ || code == NE)
4386 && (op0code == PLUS || op0code == MINUS)
4387 && CONSTANT_P (op1)
4388 && CONSTANT_P (XEXP (op0, 1))
4389 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4391 rtx x = XEXP (op0, 0);
4392 rtx c = XEXP (op0, 1);
4393 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4394 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4396 /* Detect an infinite recursive condition, where we oscillate at this
4397 simplification case between:
4398 A + B == C <---> C - B == A,
4399 where A, B, and C are all constants with non-simplifiable expressions,
4400 usually SYMBOL_REFs. */
4401 if (GET_CODE (tem) == invcode
4402 && CONSTANT_P (x)
4403 && rtx_equal_p (c, XEXP (tem, 1)))
4404 return NULL_RTX;
4406 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4409 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4410 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4411 if (code == NE
4412 && op1 == const0_rtx
4413 && GET_MODE_CLASS (mode) == MODE_INT
4414 && cmp_mode != VOIDmode
4415 /* ??? Work-around BImode bugs in the ia64 backend. */
4416 && mode != BImode
4417 && cmp_mode != BImode
4418 && nonzero_bits (op0, cmp_mode) == 1
4419 && STORE_FLAG_VALUE == 1)
4420 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4421 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4422 : lowpart_subreg (mode, op0, cmp_mode);
4424 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4425 if ((code == EQ || code == NE)
4426 && op1 == const0_rtx
4427 && op0code == XOR)
4428 return simplify_gen_relational (code, mode, cmp_mode,
4429 XEXP (op0, 0), XEXP (op0, 1));
4431 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4432 if ((code == EQ || code == NE)
4433 && op0code == XOR
4434 && rtx_equal_p (XEXP (op0, 0), op1)
4435 && !side_effects_p (XEXP (op0, 0)))
4436 return simplify_gen_relational (code, mode, cmp_mode,
4437 XEXP (op0, 1), const0_rtx);
4439 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4440 if ((code == EQ || code == NE)
4441 && op0code == XOR
4442 && rtx_equal_p (XEXP (op0, 1), op1)
4443 && !side_effects_p (XEXP (op0, 1)))
4444 return simplify_gen_relational (code, mode, cmp_mode,
4445 XEXP (op0, 0), const0_rtx);
4447 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4448 if ((code == EQ || code == NE)
4449 && op0code == XOR
4450 && CONST_SCALAR_INT_P (op1)
4451 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4452 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4453 simplify_gen_binary (XOR, cmp_mode,
4454 XEXP (op0, 1), op1));
4456 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4457 if ((code == EQ || code == NE)
4458 && GET_CODE (op0) == BSWAP
4459 && CONST_SCALAR_INT_P (op1))
4460 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4461 simplify_gen_unary (BSWAP, cmp_mode,
4462 op1, cmp_mode));
4464 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4465 if ((code == EQ || code == NE)
4466 && GET_CODE (op0) == BSWAP
4467 && GET_CODE (op1) == BSWAP)
4468 return simplify_gen_relational (code, mode, cmp_mode,
4469 XEXP (op0, 0), XEXP (op1, 0));
4471 if (op0code == POPCOUNT && op1 == const0_rtx)
4472 switch (code)
4474 case EQ:
4475 case LE:
4476 case LEU:
4477 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4478 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4479 XEXP (op0, 0), const0_rtx);
4481 case NE:
4482 case GT:
4483 case GTU:
4484 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4485 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4486 XEXP (op0, 0), const0_rtx);
4488 default:
4489 break;
4492 return NULL_RTX;
4495 enum
4497 CMP_EQ = 1,
4498 CMP_LT = 2,
4499 CMP_GT = 4,
4500 CMP_LTU = 8,
4501 CMP_GTU = 16
4505 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4506 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4507 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4508 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4509 For floating-point comparisons, assume that the operands were ordered. */
4511 static rtx
4512 comparison_result (enum rtx_code code, int known_results)
4514 switch (code)
4516 case EQ:
4517 case UNEQ:
4518 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4519 case NE:
4520 case LTGT:
4521 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4523 case LT:
4524 case UNLT:
4525 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4526 case GE:
4527 case UNGE:
4528 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4530 case GT:
4531 case UNGT:
4532 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4533 case LE:
4534 case UNLE:
4535 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4537 case LTU:
4538 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4539 case GEU:
4540 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4542 case GTU:
4543 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4544 case LEU:
4545 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4547 case ORDERED:
4548 return const_true_rtx;
4549 case UNORDERED:
4550 return const0_rtx;
4551 default:
4552 gcc_unreachable ();
4556 /* Check if the given comparison (done in the given MODE) is actually
4557 a tautology or a contradiction. If the mode is VOID_mode, the
4558 comparison is done in "infinite precision". If no simplification
4559 is possible, this function returns zero. Otherwise, it returns
4560 either const_true_rtx or const0_rtx. */
4563 simplify_const_relational_operation (enum rtx_code code,
4564 enum machine_mode mode,
4565 rtx op0, rtx op1)
4567 rtx tem;
4568 rtx trueop0;
4569 rtx trueop1;
4571 gcc_assert (mode != VOIDmode
4572 || (GET_MODE (op0) == VOIDmode
4573 && GET_MODE (op1) == VOIDmode));
4575 /* If op0 is a compare, extract the comparison arguments from it. */
4576 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4578 op1 = XEXP (op0, 1);
4579 op0 = XEXP (op0, 0);
4581 if (GET_MODE (op0) != VOIDmode)
4582 mode = GET_MODE (op0);
4583 else if (GET_MODE (op1) != VOIDmode)
4584 mode = GET_MODE (op1);
4585 else
4586 return 0;
4589 /* We can't simplify MODE_CC values since we don't know what the
4590 actual comparison is. */
4591 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4592 return 0;
4594 /* Make sure the constant is second. */
4595 if (swap_commutative_operands_p (op0, op1))
4597 tem = op0, op0 = op1, op1 = tem;
4598 code = swap_condition (code);
4601 trueop0 = avoid_constant_pool_reference (op0);
4602 trueop1 = avoid_constant_pool_reference (op1);
4604 /* For integer comparisons of A and B maybe we can simplify A - B and can
4605 then simplify a comparison of that with zero. If A and B are both either
4606 a register or a CONST_INT, this can't help; testing for these cases will
4607 prevent infinite recursion here and speed things up.
4609 We can only do this for EQ and NE comparisons as otherwise we may
4610 lose or introduce overflow which we cannot disregard as undefined as
4611 we do not know the signedness of the operation on either the left or
4612 the right hand side of the comparison. */
4614 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4615 && (code == EQ || code == NE)
4616 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4617 && (REG_P (op1) || CONST_INT_P (trueop1)))
4618 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4619 /* We cannot do this if tem is a nonzero address. */
4620 && ! nonzero_address_p (tem))
4621 return simplify_const_relational_operation (signed_condition (code),
4622 mode, tem, const0_rtx);
4624 if (! HONOR_NANS (mode) && code == ORDERED)
4625 return const_true_rtx;
4627 if (! HONOR_NANS (mode) && code == UNORDERED)
4628 return const0_rtx;
4630 /* For modes without NaNs, if the two operands are equal, we know the
4631 result except if they have side-effects. Even with NaNs we know
4632 the result of unordered comparisons and, if signaling NaNs are
4633 irrelevant, also the result of LT/GT/LTGT. */
4634 if ((! HONOR_NANS (GET_MODE (trueop0))
4635 || code == UNEQ || code == UNLE || code == UNGE
4636 || ((code == LT || code == GT || code == LTGT)
4637 && ! HONOR_SNANS (GET_MODE (trueop0))))
4638 && rtx_equal_p (trueop0, trueop1)
4639 && ! side_effects_p (trueop0))
4640 return comparison_result (code, CMP_EQ);
4642 /* If the operands are floating-point constants, see if we can fold
4643 the result. */
4644 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4645 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4646 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4648 REAL_VALUE_TYPE d0, d1;
4650 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4651 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4653 /* Comparisons are unordered iff at least one of the values is NaN. */
4654 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4655 switch (code)
4657 case UNEQ:
4658 case UNLT:
4659 case UNGT:
4660 case UNLE:
4661 case UNGE:
4662 case NE:
4663 case UNORDERED:
4664 return const_true_rtx;
4665 case EQ:
4666 case LT:
4667 case GT:
4668 case LE:
4669 case GE:
4670 case LTGT:
4671 case ORDERED:
4672 return const0_rtx;
4673 default:
4674 return 0;
4677 return comparison_result (code,
4678 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4679 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4682 /* Otherwise, see if the operands are both integers. */
4683 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4684 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4686 /* It would be nice if we really had a mode here. However, the
4687 largest int representable on the target is as good as
4688 infinite. */
4689 enum machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4690 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4691 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4693 if (wi::eq_p (ptrueop0, ptrueop1))
4694 return comparison_result (code, CMP_EQ);
4695 else
4697 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4698 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4699 return comparison_result (code, cr);
4703 /* Optimize comparisons with upper and lower bounds. */
4704 if (HWI_COMPUTABLE_MODE_P (mode)
4705 && CONST_INT_P (trueop1))
4707 int sign;
4708 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4709 HOST_WIDE_INT val = INTVAL (trueop1);
4710 HOST_WIDE_INT mmin, mmax;
4712 if (code == GEU
4713 || code == LEU
4714 || code == GTU
4715 || code == LTU)
4716 sign = 0;
4717 else
4718 sign = 1;
4720 /* Get a reduced range if the sign bit is zero. */
4721 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4723 mmin = 0;
4724 mmax = nonzero;
4726 else
4728 rtx mmin_rtx, mmax_rtx;
4729 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4731 mmin = INTVAL (mmin_rtx);
4732 mmax = INTVAL (mmax_rtx);
4733 if (sign)
4735 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4737 mmin >>= (sign_copies - 1);
4738 mmax >>= (sign_copies - 1);
4742 switch (code)
4744 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4745 case GEU:
4746 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4747 return const_true_rtx;
4748 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4749 return const0_rtx;
4750 break;
4751 case GE:
4752 if (val <= mmin)
4753 return const_true_rtx;
4754 if (val > mmax)
4755 return const0_rtx;
4756 break;
4758 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4759 case LEU:
4760 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4761 return const_true_rtx;
4762 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4763 return const0_rtx;
4764 break;
4765 case LE:
4766 if (val >= mmax)
4767 return const_true_rtx;
4768 if (val < mmin)
4769 return const0_rtx;
4770 break;
4772 case EQ:
4773 /* x == y is always false for y out of range. */
4774 if (val < mmin || val > mmax)
4775 return const0_rtx;
4776 break;
4778 /* x > y is always false for y >= mmax, always true for y < mmin. */
4779 case GTU:
4780 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4781 return const0_rtx;
4782 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4783 return const_true_rtx;
4784 break;
4785 case GT:
4786 if (val >= mmax)
4787 return const0_rtx;
4788 if (val < mmin)
4789 return const_true_rtx;
4790 break;
4792 /* x < y is always false for y <= mmin, always true for y > mmax. */
4793 case LTU:
4794 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4795 return const0_rtx;
4796 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4797 return const_true_rtx;
4798 break;
4799 case LT:
4800 if (val <= mmin)
4801 return const0_rtx;
4802 if (val > mmax)
4803 return const_true_rtx;
4804 break;
4806 case NE:
4807 /* x != y is always true for y out of range. */
4808 if (val < mmin || val > mmax)
4809 return const_true_rtx;
4810 break;
4812 default:
4813 break;
4817 /* Optimize integer comparisons with zero. */
4818 if (trueop1 == const0_rtx)
4820 /* Some addresses are known to be nonzero. We don't know
4821 their sign, but equality comparisons are known. */
4822 if (nonzero_address_p (trueop0))
4824 if (code == EQ || code == LEU)
4825 return const0_rtx;
4826 if (code == NE || code == GTU)
4827 return const_true_rtx;
4830 /* See if the first operand is an IOR with a constant. If so, we
4831 may be able to determine the result of this comparison. */
4832 if (GET_CODE (op0) == IOR)
4834 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4835 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4837 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4838 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4839 && (UINTVAL (inner_const)
4840 & ((unsigned HOST_WIDE_INT) 1
4841 << sign_bitnum)));
4843 switch (code)
4845 case EQ:
4846 case LEU:
4847 return const0_rtx;
4848 case NE:
4849 case GTU:
4850 return const_true_rtx;
4851 case LT:
4852 case LE:
4853 if (has_sign)
4854 return const_true_rtx;
4855 break;
4856 case GT:
4857 case GE:
4858 if (has_sign)
4859 return const0_rtx;
4860 break;
4861 default:
4862 break;
4868 /* Optimize comparison of ABS with zero. */
4869 if (trueop1 == CONST0_RTX (mode)
4870 && (GET_CODE (trueop0) == ABS
4871 || (GET_CODE (trueop0) == FLOAT_EXTEND
4872 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4874 switch (code)
4876 case LT:
4877 /* Optimize abs(x) < 0.0. */
4878 if (!HONOR_SNANS (mode)
4879 && (!INTEGRAL_MODE_P (mode)
4880 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4882 if (INTEGRAL_MODE_P (mode)
4883 && (issue_strict_overflow_warning
4884 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4885 warning (OPT_Wstrict_overflow,
4886 ("assuming signed overflow does not occur when "
4887 "assuming abs (x) < 0 is false"));
4888 return const0_rtx;
4890 break;
4892 case GE:
4893 /* Optimize abs(x) >= 0.0. */
4894 if (!HONOR_NANS (mode)
4895 && (!INTEGRAL_MODE_P (mode)
4896 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4898 if (INTEGRAL_MODE_P (mode)
4899 && (issue_strict_overflow_warning
4900 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4901 warning (OPT_Wstrict_overflow,
4902 ("assuming signed overflow does not occur when "
4903 "assuming abs (x) >= 0 is true"));
4904 return const_true_rtx;
4906 break;
4908 case UNGE:
4909 /* Optimize ! (abs(x) < 0.0). */
4910 return const_true_rtx;
4912 default:
4913 break;
4917 return 0;
4920 /* Simplify CODE, an operation with result mode MODE and three operands,
4921 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4922 a constant. Return 0 if no simplifications is possible. */
4925 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4926 enum machine_mode op0_mode, rtx op0, rtx op1,
4927 rtx op2)
4929 unsigned int width = GET_MODE_PRECISION (mode);
4930 bool any_change = false;
4931 rtx tem, trueop2;
4933 /* VOIDmode means "infinite" precision. */
4934 if (width == 0)
4935 width = HOST_BITS_PER_WIDE_INT;
4937 switch (code)
4939 case FMA:
4940 /* Simplify negations around the multiplication. */
4941 /* -a * -b + c => a * b + c. */
4942 if (GET_CODE (op0) == NEG)
4944 tem = simplify_unary_operation (NEG, mode, op1, mode);
4945 if (tem)
4946 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4948 else if (GET_CODE (op1) == NEG)
4950 tem = simplify_unary_operation (NEG, mode, op0, mode);
4951 if (tem)
4952 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4955 /* Canonicalize the two multiplication operands. */
4956 /* a * -b + c => -b * a + c. */
4957 if (swap_commutative_operands_p (op0, op1))
4958 tem = op0, op0 = op1, op1 = tem, any_change = true;
4960 if (any_change)
4961 return gen_rtx_FMA (mode, op0, op1, op2);
4962 return NULL_RTX;
4964 case SIGN_EXTRACT:
4965 case ZERO_EXTRACT:
4966 if (CONST_INT_P (op0)
4967 && CONST_INT_P (op1)
4968 && CONST_INT_P (op2)
4969 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4970 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4972 /* Extracting a bit-field from a constant */
4973 unsigned HOST_WIDE_INT val = UINTVAL (op0);
4974 HOST_WIDE_INT op1val = INTVAL (op1);
4975 HOST_WIDE_INT op2val = INTVAL (op2);
4976 if (BITS_BIG_ENDIAN)
4977 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4978 else
4979 val >>= op2val;
4981 if (HOST_BITS_PER_WIDE_INT != op1val)
4983 /* First zero-extend. */
4984 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4985 /* If desired, propagate sign bit. */
4986 if (code == SIGN_EXTRACT
4987 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4988 != 0)
4989 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4992 return gen_int_mode (val, mode);
4994 break;
4996 case IF_THEN_ELSE:
4997 if (CONST_INT_P (op0))
4998 return op0 != const0_rtx ? op1 : op2;
5000 /* Convert c ? a : a into "a". */
5001 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5002 return op1;
5004 /* Convert a != b ? a : b into "a". */
5005 if (GET_CODE (op0) == NE
5006 && ! side_effects_p (op0)
5007 && ! HONOR_NANS (mode)
5008 && ! HONOR_SIGNED_ZEROS (mode)
5009 && ((rtx_equal_p (XEXP (op0, 0), op1)
5010 && rtx_equal_p (XEXP (op0, 1), op2))
5011 || (rtx_equal_p (XEXP (op0, 0), op2)
5012 && rtx_equal_p (XEXP (op0, 1), op1))))
5013 return op1;
5015 /* Convert a == b ? a : b into "b". */
5016 if (GET_CODE (op0) == EQ
5017 && ! side_effects_p (op0)
5018 && ! HONOR_NANS (mode)
5019 && ! HONOR_SIGNED_ZEROS (mode)
5020 && ((rtx_equal_p (XEXP (op0, 0), op1)
5021 && rtx_equal_p (XEXP (op0, 1), op2))
5022 || (rtx_equal_p (XEXP (op0, 0), op2)
5023 && rtx_equal_p (XEXP (op0, 1), op1))))
5024 return op2;
5026 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5028 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5029 ? GET_MODE (XEXP (op0, 1))
5030 : GET_MODE (XEXP (op0, 0)));
5031 rtx temp;
5033 /* Look for happy constants in op1 and op2. */
5034 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5036 HOST_WIDE_INT t = INTVAL (op1);
5037 HOST_WIDE_INT f = INTVAL (op2);
5039 if (t == STORE_FLAG_VALUE && f == 0)
5040 code = GET_CODE (op0);
5041 else if (t == 0 && f == STORE_FLAG_VALUE)
5043 enum rtx_code tmp;
5044 tmp = reversed_comparison_code (op0, NULL_RTX);
5045 if (tmp == UNKNOWN)
5046 break;
5047 code = tmp;
5049 else
5050 break;
5052 return simplify_gen_relational (code, mode, cmp_mode,
5053 XEXP (op0, 0), XEXP (op0, 1));
5056 if (cmp_mode == VOIDmode)
5057 cmp_mode = op0_mode;
5058 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5059 cmp_mode, XEXP (op0, 0),
5060 XEXP (op0, 1));
5062 /* See if any simplifications were possible. */
5063 if (temp)
5065 if (CONST_INT_P (temp))
5066 return temp == const0_rtx ? op2 : op1;
5067 else if (temp)
5068 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5071 break;
5073 case VEC_MERGE:
5074 gcc_assert (GET_MODE (op0) == mode);
5075 gcc_assert (GET_MODE (op1) == mode);
5076 gcc_assert (VECTOR_MODE_P (mode));
5077 trueop2 = avoid_constant_pool_reference (op2);
5078 if (CONST_INT_P (trueop2))
5080 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5081 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5082 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5083 unsigned HOST_WIDE_INT mask;
5084 if (n_elts == HOST_BITS_PER_WIDE_INT)
5085 mask = -1;
5086 else
5087 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5089 if (!(sel & mask) && !side_effects_p (op0))
5090 return op1;
5091 if ((sel & mask) == mask && !side_effects_p (op1))
5092 return op0;
5094 rtx trueop0 = avoid_constant_pool_reference (op0);
5095 rtx trueop1 = avoid_constant_pool_reference (op1);
5096 if (GET_CODE (trueop0) == CONST_VECTOR
5097 && GET_CODE (trueop1) == CONST_VECTOR)
5099 rtvec v = rtvec_alloc (n_elts);
5100 unsigned int i;
5102 for (i = 0; i < n_elts; i++)
5103 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5104 ? CONST_VECTOR_ELT (trueop0, i)
5105 : CONST_VECTOR_ELT (trueop1, i));
5106 return gen_rtx_CONST_VECTOR (mode, v);
5109 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5110 if no element from a appears in the result. */
5111 if (GET_CODE (op0) == VEC_MERGE)
5113 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5114 if (CONST_INT_P (tem))
5116 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5117 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5118 return simplify_gen_ternary (code, mode, mode,
5119 XEXP (op0, 1), op1, op2);
5120 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5121 return simplify_gen_ternary (code, mode, mode,
5122 XEXP (op0, 0), op1, op2);
5125 if (GET_CODE (op1) == VEC_MERGE)
5127 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5128 if (CONST_INT_P (tem))
5130 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5131 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5132 return simplify_gen_ternary (code, mode, mode,
5133 op0, XEXP (op1, 1), op2);
5134 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5135 return simplify_gen_ternary (code, mode, mode,
5136 op0, XEXP (op1, 0), op2);
5141 if (rtx_equal_p (op0, op1)
5142 && !side_effects_p (op2) && !side_effects_p (op1))
5143 return op0;
5145 break;
5147 default:
5148 gcc_unreachable ();
5151 return 0;
5154 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5155 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5156 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5158 Works by unpacking OP into a collection of 8-bit values
5159 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5160 and then repacking them again for OUTERMODE. */
5162 static rtx
5163 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5164 enum machine_mode innermode, unsigned int byte)
5166 enum {
5167 value_bit = 8,
5168 value_mask = (1 << value_bit) - 1
5170 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5171 int value_start;
5172 int i;
5173 int elem;
5175 int num_elem;
5176 rtx * elems;
5177 int elem_bitsize;
5178 rtx result_s;
5179 rtvec result_v = NULL;
5180 enum mode_class outer_class;
5181 enum machine_mode outer_submode;
5182 int max_bitsize;
5184 /* Some ports misuse CCmode. */
5185 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5186 return op;
5188 /* We have no way to represent a complex constant at the rtl level. */
5189 if (COMPLEX_MODE_P (outermode))
5190 return NULL_RTX;
5192 /* We support any size mode. */
5193 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5194 GET_MODE_BITSIZE (innermode));
5196 /* Unpack the value. */
5198 if (GET_CODE (op) == CONST_VECTOR)
5200 num_elem = CONST_VECTOR_NUNITS (op);
5201 elems = &CONST_VECTOR_ELT (op, 0);
5202 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5204 else
5206 num_elem = 1;
5207 elems = &op;
5208 elem_bitsize = max_bitsize;
5210 /* If this asserts, it is too complicated; reducing value_bit may help. */
5211 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5212 /* I don't know how to handle endianness of sub-units. */
5213 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5215 for (elem = 0; elem < num_elem; elem++)
5217 unsigned char * vp;
5218 rtx el = elems[elem];
5220 /* Vectors are kept in target memory order. (This is probably
5221 a mistake.) */
5223 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5224 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5225 / BITS_PER_UNIT);
5226 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5227 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5228 unsigned bytele = (subword_byte % UNITS_PER_WORD
5229 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5230 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5233 switch (GET_CODE (el))
5235 case CONST_INT:
5236 for (i = 0;
5237 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5238 i += value_bit)
5239 *vp++ = INTVAL (el) >> i;
5240 /* CONST_INTs are always logically sign-extended. */
5241 for (; i < elem_bitsize; i += value_bit)
5242 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5243 break;
5245 case CONST_WIDE_INT:
5247 rtx_mode_t val = std::make_pair (el, innermode);
5248 unsigned char extend = wi::sign_mask (val);
5250 for (i = 0; i < elem_bitsize; i += value_bit)
5251 *vp++ = wi::extract_uhwi (val, i, value_bit);
5252 for (; i < elem_bitsize; i += value_bit)
5253 *vp++ = extend;
5255 break;
5257 case CONST_DOUBLE:
5258 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5260 unsigned char extend = 0;
5261 /* If this triggers, someone should have generated a
5262 CONST_INT instead. */
5263 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5265 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5266 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5267 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5269 *vp++
5270 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5271 i += value_bit;
5274 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5275 extend = -1;
5276 for (; i < elem_bitsize; i += value_bit)
5277 *vp++ = extend;
5279 else
5281 /* This is big enough for anything on the platform. */
5282 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5283 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5285 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5286 gcc_assert (bitsize <= elem_bitsize);
5287 gcc_assert (bitsize % value_bit == 0);
5289 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5290 GET_MODE (el));
5292 /* real_to_target produces its result in words affected by
5293 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5294 and use WORDS_BIG_ENDIAN instead; see the documentation
5295 of SUBREG in rtl.texi. */
5296 for (i = 0; i < bitsize; i += value_bit)
5298 int ibase;
5299 if (WORDS_BIG_ENDIAN)
5300 ibase = bitsize - 1 - i;
5301 else
5302 ibase = i;
5303 *vp++ = tmp[ibase / 32] >> i % 32;
5306 /* It shouldn't matter what's done here, so fill it with
5307 zero. */
5308 for (; i < elem_bitsize; i += value_bit)
5309 *vp++ = 0;
5311 break;
5313 case CONST_FIXED:
5314 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5316 for (i = 0; i < elem_bitsize; i += value_bit)
5317 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5319 else
5321 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5322 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5323 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5324 i += value_bit)
5325 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5326 >> (i - HOST_BITS_PER_WIDE_INT);
5327 for (; i < elem_bitsize; i += value_bit)
5328 *vp++ = 0;
5330 break;
5332 default:
5333 gcc_unreachable ();
5337 /* Now, pick the right byte to start with. */
5338 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5339 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5340 will already have offset 0. */
5341 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5343 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5344 - byte);
5345 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5346 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5347 byte = (subword_byte % UNITS_PER_WORD
5348 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5351 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5352 so if it's become negative it will instead be very large.) */
5353 gcc_assert (byte < GET_MODE_SIZE (innermode));
5355 /* Convert from bytes to chunks of size value_bit. */
5356 value_start = byte * (BITS_PER_UNIT / value_bit);
5358 /* Re-pack the value. */
5360 if (VECTOR_MODE_P (outermode))
5362 num_elem = GET_MODE_NUNITS (outermode);
5363 result_v = rtvec_alloc (num_elem);
5364 elems = &RTVEC_ELT (result_v, 0);
5365 outer_submode = GET_MODE_INNER (outermode);
5367 else
5369 num_elem = 1;
5370 elems = &result_s;
5371 outer_submode = outermode;
5374 outer_class = GET_MODE_CLASS (outer_submode);
5375 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5377 gcc_assert (elem_bitsize % value_bit == 0);
5378 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5380 for (elem = 0; elem < num_elem; elem++)
5382 unsigned char *vp;
5384 /* Vectors are stored in target memory order. (This is probably
5385 a mistake.) */
5387 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5388 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5389 / BITS_PER_UNIT);
5390 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5391 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5392 unsigned bytele = (subword_byte % UNITS_PER_WORD
5393 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5394 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5397 switch (outer_class)
5399 case MODE_INT:
5400 case MODE_PARTIAL_INT:
5402 int u;
5403 int base = 0;
5404 int units
5405 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5406 / HOST_BITS_PER_WIDE_INT;
5407 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5408 wide_int r;
5410 for (u = 0; u < units; u++)
5412 unsigned HOST_WIDE_INT buf = 0;
5413 for (i = 0;
5414 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5415 i += value_bit)
5416 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5418 tmp[u] = buf;
5419 base += HOST_BITS_PER_WIDE_INT;
5421 gcc_assert (GET_MODE_PRECISION (outer_submode)
5422 <= MAX_BITSIZE_MODE_ANY_INT);
5423 r = wide_int::from_array (tmp, units,
5424 GET_MODE_PRECISION (outer_submode));
5425 elems[elem] = immed_wide_int_const (r, outer_submode);
5427 break;
5429 case MODE_FLOAT:
5430 case MODE_DECIMAL_FLOAT:
5432 REAL_VALUE_TYPE r;
5433 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5435 /* real_from_target wants its input in words affected by
5436 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5437 and use WORDS_BIG_ENDIAN instead; see the documentation
5438 of SUBREG in rtl.texi. */
5439 for (i = 0; i < max_bitsize / 32; i++)
5440 tmp[i] = 0;
5441 for (i = 0; i < elem_bitsize; i += value_bit)
5443 int ibase;
5444 if (WORDS_BIG_ENDIAN)
5445 ibase = elem_bitsize - 1 - i;
5446 else
5447 ibase = i;
5448 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5451 real_from_target (&r, tmp, outer_submode);
5452 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5454 break;
5456 case MODE_FRACT:
5457 case MODE_UFRACT:
5458 case MODE_ACCUM:
5459 case MODE_UACCUM:
5461 FIXED_VALUE_TYPE f;
5462 f.data.low = 0;
5463 f.data.high = 0;
5464 f.mode = outer_submode;
5466 for (i = 0;
5467 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5468 i += value_bit)
5469 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5470 for (; i < elem_bitsize; i += value_bit)
5471 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5472 << (i - HOST_BITS_PER_WIDE_INT));
5474 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5476 break;
5478 default:
5479 gcc_unreachable ();
5482 if (VECTOR_MODE_P (outermode))
5483 return gen_rtx_CONST_VECTOR (outermode, result_v);
5484 else
5485 return result_s;
5488 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5489 Return 0 if no simplifications are possible. */
5491 simplify_subreg (enum machine_mode outermode, rtx op,
5492 enum machine_mode innermode, unsigned int byte)
5494 /* Little bit of sanity checking. */
5495 gcc_assert (innermode != VOIDmode);
5496 gcc_assert (outermode != VOIDmode);
5497 gcc_assert (innermode != BLKmode);
5498 gcc_assert (outermode != BLKmode);
5500 gcc_assert (GET_MODE (op) == innermode
5501 || GET_MODE (op) == VOIDmode);
5503 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5504 return NULL_RTX;
5506 if (byte >= GET_MODE_SIZE (innermode))
5507 return NULL_RTX;
5509 if (outermode == innermode && !byte)
5510 return op;
5512 if (CONST_SCALAR_INT_P (op)
5513 || CONST_DOUBLE_AS_FLOAT_P (op)
5514 || GET_CODE (op) == CONST_FIXED
5515 || GET_CODE (op) == CONST_VECTOR)
5516 return simplify_immed_subreg (outermode, op, innermode, byte);
5518 /* Changing mode twice with SUBREG => just change it once,
5519 or not at all if changing back op starting mode. */
5520 if (GET_CODE (op) == SUBREG)
5522 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5523 int final_offset = byte + SUBREG_BYTE (op);
5524 rtx newx;
5526 if (outermode == innermostmode
5527 && byte == 0 && SUBREG_BYTE (op) == 0)
5528 return SUBREG_REG (op);
5530 /* The SUBREG_BYTE represents offset, as if the value were stored
5531 in memory. Irritating exception is paradoxical subreg, where
5532 we define SUBREG_BYTE to be 0. On big endian machines, this
5533 value should be negative. For a moment, undo this exception. */
5534 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5536 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5537 if (WORDS_BIG_ENDIAN)
5538 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5539 if (BYTES_BIG_ENDIAN)
5540 final_offset += difference % UNITS_PER_WORD;
5542 if (SUBREG_BYTE (op) == 0
5543 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5545 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5546 if (WORDS_BIG_ENDIAN)
5547 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5548 if (BYTES_BIG_ENDIAN)
5549 final_offset += difference % UNITS_PER_WORD;
5552 /* See whether resulting subreg will be paradoxical. */
5553 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5555 /* In nonparadoxical subregs we can't handle negative offsets. */
5556 if (final_offset < 0)
5557 return NULL_RTX;
5558 /* Bail out in case resulting subreg would be incorrect. */
5559 if (final_offset % GET_MODE_SIZE (outermode)
5560 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5561 return NULL_RTX;
5563 else
5565 int offset = 0;
5566 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5568 /* In paradoxical subreg, see if we are still looking on lower part.
5569 If so, our SUBREG_BYTE will be 0. */
5570 if (WORDS_BIG_ENDIAN)
5571 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5572 if (BYTES_BIG_ENDIAN)
5573 offset += difference % UNITS_PER_WORD;
5574 if (offset == final_offset)
5575 final_offset = 0;
5576 else
5577 return NULL_RTX;
5580 /* Recurse for further possible simplifications. */
5581 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5582 final_offset);
5583 if (newx)
5584 return newx;
5585 if (validate_subreg (outermode, innermostmode,
5586 SUBREG_REG (op), final_offset))
5588 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5589 if (SUBREG_PROMOTED_VAR_P (op)
5590 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5591 && GET_MODE_CLASS (outermode) == MODE_INT
5592 && IN_RANGE (GET_MODE_SIZE (outermode),
5593 GET_MODE_SIZE (innermode),
5594 GET_MODE_SIZE (innermostmode))
5595 && subreg_lowpart_p (newx))
5597 SUBREG_PROMOTED_VAR_P (newx) = 1;
5598 SUBREG_PROMOTED_UNSIGNED_SET
5599 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5601 return newx;
5603 return NULL_RTX;
5606 /* SUBREG of a hard register => just change the register number
5607 and/or mode. If the hard register is not valid in that mode,
5608 suppress this simplification. If the hard register is the stack,
5609 frame, or argument pointer, leave this as a SUBREG. */
5611 if (REG_P (op) && HARD_REGISTER_P (op))
5613 unsigned int regno, final_regno;
5615 regno = REGNO (op);
5616 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5617 if (HARD_REGISTER_NUM_P (final_regno))
5619 rtx x;
5620 int final_offset = byte;
5622 /* Adjust offset for paradoxical subregs. */
5623 if (byte == 0
5624 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5626 int difference = (GET_MODE_SIZE (innermode)
5627 - GET_MODE_SIZE (outermode));
5628 if (WORDS_BIG_ENDIAN)
5629 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5630 if (BYTES_BIG_ENDIAN)
5631 final_offset += difference % UNITS_PER_WORD;
5634 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5636 /* Propagate original regno. We don't have any way to specify
5637 the offset inside original regno, so do so only for lowpart.
5638 The information is used only by alias analysis that can not
5639 grog partial register anyway. */
5641 if (subreg_lowpart_offset (outermode, innermode) == byte)
5642 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5643 return x;
5647 /* If we have a SUBREG of a register that we are replacing and we are
5648 replacing it with a MEM, make a new MEM and try replacing the
5649 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5650 or if we would be widening it. */
5652 if (MEM_P (op)
5653 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5654 /* Allow splitting of volatile memory references in case we don't
5655 have instruction to move the whole thing. */
5656 && (! MEM_VOLATILE_P (op)
5657 || ! have_insn_for (SET, innermode))
5658 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5659 return adjust_address_nv (op, outermode, byte);
5661 /* Handle complex values represented as CONCAT
5662 of real and imaginary part. */
5663 if (GET_CODE (op) == CONCAT)
5665 unsigned int part_size, final_offset;
5666 rtx part, res;
5668 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5669 if (byte < part_size)
5671 part = XEXP (op, 0);
5672 final_offset = byte;
5674 else
5676 part = XEXP (op, 1);
5677 final_offset = byte - part_size;
5680 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5681 return NULL_RTX;
5683 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5684 if (res)
5685 return res;
5686 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5687 return gen_rtx_SUBREG (outermode, part, final_offset);
5688 return NULL_RTX;
5691 /* A SUBREG resulting from a zero extension may fold to zero if
5692 it extracts higher bits that the ZERO_EXTEND's source bits. */
5693 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5695 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5696 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5697 return CONST0_RTX (outermode);
5700 if (SCALAR_INT_MODE_P (outermode)
5701 && SCALAR_INT_MODE_P (innermode)
5702 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5703 && byte == subreg_lowpart_offset (outermode, innermode))
5705 rtx tem = simplify_truncation (outermode, op, innermode);
5706 if (tem)
5707 return tem;
5710 return NULL_RTX;
5713 /* Make a SUBREG operation or equivalent if it folds. */
5716 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5717 enum machine_mode innermode, unsigned int byte)
5719 rtx newx;
5721 newx = simplify_subreg (outermode, op, innermode, byte);
5722 if (newx)
5723 return newx;
5725 if (GET_CODE (op) == SUBREG
5726 || GET_CODE (op) == CONCAT
5727 || GET_MODE (op) == VOIDmode)
5728 return NULL_RTX;
5730 if (validate_subreg (outermode, innermode, op, byte))
5731 return gen_rtx_SUBREG (outermode, op, byte);
5733 return NULL_RTX;
5736 /* Simplify X, an rtx expression.
5738 Return the simplified expression or NULL if no simplifications
5739 were possible.
5741 This is the preferred entry point into the simplification routines;
5742 however, we still allow passes to call the more specific routines.
5744 Right now GCC has three (yes, three) major bodies of RTL simplification
5745 code that need to be unified.
5747 1. fold_rtx in cse.c. This code uses various CSE specific
5748 information to aid in RTL simplification.
5750 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5751 it uses combine specific information to aid in RTL
5752 simplification.
5754 3. The routines in this file.
5757 Long term we want to only have one body of simplification code; to
5758 get to that state I recommend the following steps:
5760 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5761 which are not pass dependent state into these routines.
5763 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5764 use this routine whenever possible.
5766 3. Allow for pass dependent state to be provided to these
5767 routines and add simplifications based on the pass dependent
5768 state. Remove code from cse.c & combine.c that becomes
5769 redundant/dead.
5771 It will take time, but ultimately the compiler will be easier to
5772 maintain and improve. It's totally silly that when we add a
5773 simplification that it needs to be added to 4 places (3 for RTL
5774 simplification and 1 for tree simplification. */
5777 simplify_rtx (const_rtx x)
5779 const enum rtx_code code = GET_CODE (x);
5780 const enum machine_mode mode = GET_MODE (x);
5782 switch (GET_RTX_CLASS (code))
5784 case RTX_UNARY:
5785 return simplify_unary_operation (code, mode,
5786 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5787 case RTX_COMM_ARITH:
5788 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5789 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5791 /* Fall through.... */
5793 case RTX_BIN_ARITH:
5794 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5796 case RTX_TERNARY:
5797 case RTX_BITFIELD_OPS:
5798 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5799 XEXP (x, 0), XEXP (x, 1),
5800 XEXP (x, 2));
5802 case RTX_COMPARE:
5803 case RTX_COMM_COMPARE:
5804 return simplify_relational_operation (code, mode,
5805 ((GET_MODE (XEXP (x, 0))
5806 != VOIDmode)
5807 ? GET_MODE (XEXP (x, 0))
5808 : GET_MODE (XEXP (x, 1))),
5809 XEXP (x, 0),
5810 XEXP (x, 1));
5812 case RTX_EXTRA:
5813 if (code == SUBREG)
5814 return simplify_subreg (mode, SUBREG_REG (x),
5815 GET_MODE (SUBREG_REG (x)),
5816 SUBREG_BYTE (x));
5817 break;
5819 case RTX_OBJ:
5820 if (code == LO_SUM)
5822 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5823 if (GET_CODE (XEXP (x, 0)) == HIGH
5824 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5825 return XEXP (x, 1);
5827 break;
5829 default:
5830 break;
5832 return NULL;