2015-06-11 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / simplify-rtx.c
bloba572ff016cf2f4bf77c39dea058cb379ce3f1f0f
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "input.h"
27 #include "alias.h"
28 #include "symtab.h"
29 #include "tree.h"
30 #include "fold-const.h"
31 #include "varasm.h"
32 #include "tm_p.h"
33 #include "regs.h"
34 #include "hard-reg-set.h"
35 #include "flags.h"
36 #include "insn-config.h"
37 #include "recog.h"
38 #include "function.h"
39 #include "insn-codes.h"
40 #include "optabs.h"
41 #include "expmed.h"
42 #include "dojump.h"
43 #include "explow.h"
44 #include "calls.h"
45 #include "emit-rtl.h"
46 #include "stmt.h"
47 #include "expr.h"
48 #include "diagnostic-core.h"
49 #include "target.h"
50 #include "predict.h"
52 /* Simplification and canonicalization of RTL. */
54 /* Much code operates on (low, high) pairs; the low value is an
55 unsigned wide int, the high value a signed wide int. We
56 occasionally need to sign extend from low to high as if low were a
57 signed wide int. */
58 #define HWI_SIGN_EXTEND(low) \
59 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
61 static rtx neg_const_int (machine_mode, const_rtx);
62 static bool plus_minus_operand_p (const_rtx);
63 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
64 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
65 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
66 unsigned int);
67 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
68 rtx, rtx);
69 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
70 machine_mode, rtx, rtx);
71 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
72 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
73 rtx, rtx, rtx, rtx);
75 /* Negate a CONST_INT rtx, truncating (because a conversion from a
76 maximally negative number can overflow). */
77 static rtx
78 neg_const_int (machine_mode mode, const_rtx i)
80 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
83 /* Test whether expression, X, is an immediate constant that represents
84 the most significant bit of machine mode MODE. */
86 bool
87 mode_signbit_p (machine_mode mode, const_rtx x)
89 unsigned HOST_WIDE_INT val;
90 unsigned int width;
92 if (GET_MODE_CLASS (mode) != MODE_INT)
93 return false;
95 width = GET_MODE_PRECISION (mode);
96 if (width == 0)
97 return false;
99 if (width <= HOST_BITS_PER_WIDE_INT
100 && CONST_INT_P (x))
101 val = INTVAL (x);
102 #if TARGET_SUPPORTS_WIDE_INT
103 else if (CONST_WIDE_INT_P (x))
105 unsigned int i;
106 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
107 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
108 return false;
109 for (i = 0; i < elts - 1; i++)
110 if (CONST_WIDE_INT_ELT (x, i) != 0)
111 return false;
112 val = CONST_WIDE_INT_ELT (x, elts - 1);
113 width %= HOST_BITS_PER_WIDE_INT;
114 if (width == 0)
115 width = HOST_BITS_PER_WIDE_INT;
117 #else
118 else if (width <= HOST_BITS_PER_DOUBLE_INT
119 && CONST_DOUBLE_AS_INT_P (x)
120 && CONST_DOUBLE_LOW (x) == 0)
122 val = CONST_DOUBLE_HIGH (x);
123 width -= HOST_BITS_PER_WIDE_INT;
125 #endif
126 else
127 /* X is not an integer constant. */
128 return false;
130 if (width < HOST_BITS_PER_WIDE_INT)
131 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
132 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
135 /* Test whether VAL is equal to the most significant bit of mode MODE
136 (after masking with the mode mask of MODE). Returns false if the
137 precision of MODE is too large to handle. */
139 bool
140 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
142 unsigned int width;
144 if (GET_MODE_CLASS (mode) != MODE_INT)
145 return false;
147 width = GET_MODE_PRECISION (mode);
148 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
149 return false;
151 val &= GET_MODE_MASK (mode);
152 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
155 /* Test whether the most significant bit of mode MODE is set in VAL.
156 Returns false if the precision of MODE is too large to handle. */
157 bool
158 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
160 unsigned int width;
162 if (GET_MODE_CLASS (mode) != MODE_INT)
163 return false;
165 width = GET_MODE_PRECISION (mode);
166 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
167 return false;
169 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
170 return val != 0;
173 /* Test whether the most significant bit of mode MODE is clear in VAL.
174 Returns false if the precision of MODE is too large to handle. */
175 bool
176 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
178 unsigned int width;
180 if (GET_MODE_CLASS (mode) != MODE_INT)
181 return false;
183 width = GET_MODE_PRECISION (mode);
184 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
185 return false;
187 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
188 return val == 0;
191 /* Make a binary operation by properly ordering the operands and
192 seeing if the expression folds. */
195 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
196 rtx op1)
198 rtx tem;
200 /* If this simplifies, do it. */
201 tem = simplify_binary_operation (code, mode, op0, op1);
202 if (tem)
203 return tem;
205 /* Put complex operands first and constants second if commutative. */
206 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
207 && swap_commutative_operands_p (op0, op1))
208 std::swap (op0, op1);
210 return gen_rtx_fmt_ee (code, mode, op0, op1);
213 /* If X is a MEM referencing the constant pool, return the real value.
214 Otherwise return X. */
216 avoid_constant_pool_reference (rtx x)
218 rtx c, tmp, addr;
219 machine_mode cmode;
220 HOST_WIDE_INT offset = 0;
222 switch (GET_CODE (x))
224 case MEM:
225 break;
227 case FLOAT_EXTEND:
228 /* Handle float extensions of constant pool references. */
229 tmp = XEXP (x, 0);
230 c = avoid_constant_pool_reference (tmp);
231 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
233 REAL_VALUE_TYPE d;
235 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
236 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
238 return x;
240 default:
241 return x;
244 if (GET_MODE (x) == BLKmode)
245 return x;
247 addr = XEXP (x, 0);
249 /* Call target hook to avoid the effects of -fpic etc.... */
250 addr = targetm.delegitimize_address (addr);
252 /* Split the address into a base and integer offset. */
253 if (GET_CODE (addr) == CONST
254 && GET_CODE (XEXP (addr, 0)) == PLUS
255 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
257 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
258 addr = XEXP (XEXP (addr, 0), 0);
261 if (GET_CODE (addr) == LO_SUM)
262 addr = XEXP (addr, 1);
264 /* If this is a constant pool reference, we can turn it into its
265 constant and hope that simplifications happen. */
266 if (GET_CODE (addr) == SYMBOL_REF
267 && CONSTANT_POOL_ADDRESS_P (addr))
269 c = get_pool_constant (addr);
270 cmode = get_pool_mode (addr);
272 /* If we're accessing the constant in a different mode than it was
273 originally stored, attempt to fix that up via subreg simplifications.
274 If that fails we have no choice but to return the original memory. */
275 if ((offset != 0 || cmode != GET_MODE (x))
276 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
278 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
279 if (tem && CONSTANT_P (tem))
280 return tem;
282 else
283 return c;
286 return x;
289 /* Simplify a MEM based on its attributes. This is the default
290 delegitimize_address target hook, and it's recommended that every
291 overrider call it. */
294 delegitimize_mem_from_attrs (rtx x)
296 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
297 use their base addresses as equivalent. */
298 if (MEM_P (x)
299 && MEM_EXPR (x)
300 && MEM_OFFSET_KNOWN_P (x))
302 tree decl = MEM_EXPR (x);
303 machine_mode mode = GET_MODE (x);
304 HOST_WIDE_INT offset = 0;
306 switch (TREE_CODE (decl))
308 default:
309 decl = NULL;
310 break;
312 case VAR_DECL:
313 break;
315 case ARRAY_REF:
316 case ARRAY_RANGE_REF:
317 case COMPONENT_REF:
318 case BIT_FIELD_REF:
319 case REALPART_EXPR:
320 case IMAGPART_EXPR:
321 case VIEW_CONVERT_EXPR:
323 HOST_WIDE_INT bitsize, bitpos;
324 tree toffset;
325 int unsignedp, volatilep = 0;
327 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
328 &mode, &unsignedp, &volatilep, false);
329 if (bitsize != GET_MODE_BITSIZE (mode)
330 || (bitpos % BITS_PER_UNIT)
331 || (toffset && !tree_fits_shwi_p (toffset)))
332 decl = NULL;
333 else
335 offset += bitpos / BITS_PER_UNIT;
336 if (toffset)
337 offset += tree_to_shwi (toffset);
339 break;
343 if (decl
344 && mode == GET_MODE (x)
345 && TREE_CODE (decl) == VAR_DECL
346 && (TREE_STATIC (decl)
347 || DECL_THREAD_LOCAL_P (decl))
348 && DECL_RTL_SET_P (decl)
349 && MEM_P (DECL_RTL (decl)))
351 rtx newx;
353 offset += MEM_OFFSET (x);
355 newx = DECL_RTL (decl);
357 if (MEM_P (newx))
359 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
361 /* Avoid creating a new MEM needlessly if we already had
362 the same address. We do if there's no OFFSET and the
363 old address X is identical to NEWX, or if X is of the
364 form (plus NEWX OFFSET), or the NEWX is of the form
365 (plus Y (const_int Z)) and X is that with the offset
366 added: (plus Y (const_int Z+OFFSET)). */
367 if (!((offset == 0
368 || (GET_CODE (o) == PLUS
369 && GET_CODE (XEXP (o, 1)) == CONST_INT
370 && (offset == INTVAL (XEXP (o, 1))
371 || (GET_CODE (n) == PLUS
372 && GET_CODE (XEXP (n, 1)) == CONST_INT
373 && (INTVAL (XEXP (n, 1)) + offset
374 == INTVAL (XEXP (o, 1)))
375 && (n = XEXP (n, 0))))
376 && (o = XEXP (o, 0))))
377 && rtx_equal_p (o, n)))
378 x = adjust_address_nv (newx, mode, offset);
380 else if (GET_MODE (x) == GET_MODE (newx)
381 && offset == 0)
382 x = newx;
386 return x;
389 /* Make a unary operation by first seeing if it folds and otherwise making
390 the specified operation. */
393 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
394 machine_mode op_mode)
396 rtx tem;
398 /* If this simplifies, use it. */
399 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
400 return tem;
402 return gen_rtx_fmt_e (code, mode, op);
405 /* Likewise for ternary operations. */
408 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
409 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
411 rtx tem;
413 /* If this simplifies, use it. */
414 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
415 op0, op1, op2)))
416 return tem;
418 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
421 /* Likewise, for relational operations.
422 CMP_MODE specifies mode comparison is done in. */
425 simplify_gen_relational (enum rtx_code code, machine_mode mode,
426 machine_mode cmp_mode, rtx op0, rtx op1)
428 rtx tem;
430 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
431 op0, op1)))
432 return tem;
434 return gen_rtx_fmt_ee (code, mode, op0, op1);
437 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
438 and simplify the result. If FN is non-NULL, call this callback on each
439 X, if it returns non-NULL, replace X with its return value and simplify the
440 result. */
443 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
444 rtx (*fn) (rtx, const_rtx, void *), void *data)
446 enum rtx_code code = GET_CODE (x);
447 machine_mode mode = GET_MODE (x);
448 machine_mode op_mode;
449 const char *fmt;
450 rtx op0, op1, op2, newx, op;
451 rtvec vec, newvec;
452 int i, j;
454 if (__builtin_expect (fn != NULL, 0))
456 newx = fn (x, old_rtx, data);
457 if (newx)
458 return newx;
460 else if (rtx_equal_p (x, old_rtx))
461 return copy_rtx ((rtx) data);
463 switch (GET_RTX_CLASS (code))
465 case RTX_UNARY:
466 op0 = XEXP (x, 0);
467 op_mode = GET_MODE (op0);
468 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
469 if (op0 == XEXP (x, 0))
470 return x;
471 return simplify_gen_unary (code, mode, op0, op_mode);
473 case RTX_BIN_ARITH:
474 case RTX_COMM_ARITH:
475 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_binary (code, mode, op0, op1);
481 case RTX_COMPARE:
482 case RTX_COMM_COMPARE:
483 op0 = XEXP (x, 0);
484 op1 = XEXP (x, 1);
485 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
486 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
487 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
489 return x;
490 return simplify_gen_relational (code, mode, op_mode, op0, op1);
492 case RTX_TERNARY:
493 case RTX_BITFIELD_OPS:
494 op0 = XEXP (x, 0);
495 op_mode = GET_MODE (op0);
496 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
497 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
498 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
499 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
500 return x;
501 if (op_mode == VOIDmode)
502 op_mode = GET_MODE (op0);
503 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
505 case RTX_EXTRA:
506 if (code == SUBREG)
508 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
509 if (op0 == SUBREG_REG (x))
510 return x;
511 op0 = simplify_gen_subreg (GET_MODE (x), op0,
512 GET_MODE (SUBREG_REG (x)),
513 SUBREG_BYTE (x));
514 return op0 ? op0 : x;
516 break;
518 case RTX_OBJ:
519 if (code == MEM)
521 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
522 if (op0 == XEXP (x, 0))
523 return x;
524 return replace_equiv_address_nv (x, op0);
526 else if (code == LO_SUM)
528 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
529 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
531 /* (lo_sum (high x) y) -> y where x and y have the same base. */
532 if (GET_CODE (op0) == HIGH)
534 rtx base0, base1, offset0, offset1;
535 split_const (XEXP (op0, 0), &base0, &offset0);
536 split_const (op1, &base1, &offset1);
537 if (rtx_equal_p (base0, base1))
538 return op1;
541 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
542 return x;
543 return gen_rtx_LO_SUM (mode, op0, op1);
545 break;
547 default:
548 break;
551 newx = x;
552 fmt = GET_RTX_FORMAT (code);
553 for (i = 0; fmt[i]; i++)
554 switch (fmt[i])
556 case 'E':
557 vec = XVEC (x, i);
558 newvec = XVEC (newx, i);
559 for (j = 0; j < GET_NUM_ELEM (vec); j++)
561 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
562 old_rtx, fn, data);
563 if (op != RTVEC_ELT (vec, j))
565 if (newvec == vec)
567 newvec = shallow_copy_rtvec (vec);
568 if (x == newx)
569 newx = shallow_copy_rtx (x);
570 XVEC (newx, i) = newvec;
572 RTVEC_ELT (newvec, j) = op;
575 break;
577 case 'e':
578 if (XEXP (x, i))
580 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
581 if (op != XEXP (x, i))
583 if (x == newx)
584 newx = shallow_copy_rtx (x);
585 XEXP (newx, i) = op;
588 break;
590 return newx;
593 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
594 resulting RTX. Return a new RTX which is as simplified as possible. */
597 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
599 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
602 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
603 Only handle cases where the truncated value is inherently an rvalue.
605 RTL provides two ways of truncating a value:
607 1. a lowpart subreg. This form is only a truncation when both
608 the outer and inner modes (here MODE and OP_MODE respectively)
609 are scalar integers, and only then when the subreg is used as
610 an rvalue.
612 It is only valid to form such truncating subregs if the
613 truncation requires no action by the target. The onus for
614 proving this is on the creator of the subreg -- e.g. the
615 caller to simplify_subreg or simplify_gen_subreg -- and typically
616 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
618 2. a TRUNCATE. This form handles both scalar and compound integers.
620 The first form is preferred where valid. However, the TRUNCATE
621 handling in simplify_unary_operation turns the second form into the
622 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
623 so it is generally safe to form rvalue truncations using:
625 simplify_gen_unary (TRUNCATE, ...)
627 and leave simplify_unary_operation to work out which representation
628 should be used.
630 Because of the proof requirements on (1), simplify_truncation must
631 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
632 regardless of whether the outer truncation came from a SUBREG or a
633 TRUNCATE. For example, if the caller has proven that an SImode
634 truncation of:
636 (and:DI X Y)
638 is a no-op and can be represented as a subreg, it does not follow
639 that SImode truncations of X and Y are also no-ops. On a target
640 like 64-bit MIPS that requires SImode values to be stored in
641 sign-extended form, an SImode truncation of:
643 (and:DI (reg:DI X) (const_int 63))
645 is trivially a no-op because only the lower 6 bits can be set.
646 However, X is still an arbitrary 64-bit number and so we cannot
647 assume that truncating it too is a no-op. */
649 static rtx
650 simplify_truncation (machine_mode mode, rtx op,
651 machine_mode op_mode)
653 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
654 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
655 gcc_assert (precision <= op_precision);
657 /* Optimize truncations of zero and sign extended values. */
658 if (GET_CODE (op) == ZERO_EXTEND
659 || GET_CODE (op) == SIGN_EXTEND)
661 /* There are three possibilities. If MODE is the same as the
662 origmode, we can omit both the extension and the subreg.
663 If MODE is not larger than the origmode, we can apply the
664 truncation without the extension. Finally, if the outermode
665 is larger than the origmode, we can just extend to the appropriate
666 mode. */
667 machine_mode origmode = GET_MODE (XEXP (op, 0));
668 if (mode == origmode)
669 return XEXP (op, 0);
670 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
671 return simplify_gen_unary (TRUNCATE, mode,
672 XEXP (op, 0), origmode);
673 else
674 return simplify_gen_unary (GET_CODE (op), mode,
675 XEXP (op, 0), origmode);
678 /* If the machine can perform operations in the truncated mode, distribute
679 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
680 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
681 if (1
682 #ifdef WORD_REGISTER_OPERATIONS
683 && precision >= BITS_PER_WORD
684 #endif
685 && (GET_CODE (op) == PLUS
686 || GET_CODE (op) == MINUS
687 || GET_CODE (op) == MULT))
689 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
690 if (op0)
692 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
693 if (op1)
694 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
698 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
699 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op) == LSHIFTRT
702 || GET_CODE (op) == ASHIFTRT)
703 /* Ensure that OP_MODE is at least twice as wide as MODE
704 to avoid the possibility that an outer LSHIFTRT shifts by more
705 than the sign extension's sign_bit_copies and introduces zeros
706 into the high bits of the result. */
707 && 2 * precision <= op_precision
708 && CONST_INT_P (XEXP (op, 1))
709 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
710 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
711 && UINTVAL (XEXP (op, 1)) < precision)
712 return simplify_gen_binary (ASHIFTRT, mode,
713 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
715 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
716 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
717 the outer subreg is effectively a truncation to the original mode. */
718 if ((GET_CODE (op) == LSHIFTRT
719 || GET_CODE (op) == ASHIFTRT)
720 && CONST_INT_P (XEXP (op, 1))
721 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
723 && UINTVAL (XEXP (op, 1)) < precision)
724 return simplify_gen_binary (LSHIFTRT, mode,
725 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
727 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
728 to (ashift:QI (x:QI) C), where C is a suitable small constant and
729 the outer subreg is effectively a truncation to the original mode. */
730 if (GET_CODE (op) == ASHIFT
731 && CONST_INT_P (XEXP (op, 1))
732 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
733 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
734 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
735 && UINTVAL (XEXP (op, 1)) < precision)
736 return simplify_gen_binary (ASHIFT, mode,
737 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
739 /* Recognize a word extraction from a multi-word subreg. */
740 if ((GET_CODE (op) == LSHIFTRT
741 || GET_CODE (op) == ASHIFTRT)
742 && SCALAR_INT_MODE_P (mode)
743 && SCALAR_INT_MODE_P (op_mode)
744 && precision >= BITS_PER_WORD
745 && 2 * precision <= op_precision
746 && CONST_INT_P (XEXP (op, 1))
747 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
748 && UINTVAL (XEXP (op, 1)) < op_precision)
750 int byte = subreg_lowpart_offset (mode, op_mode);
751 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
752 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
753 (WORDS_BIG_ENDIAN
754 ? byte - shifted_bytes
755 : byte + shifted_bytes));
758 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
759 and try replacing the TRUNCATE and shift with it. Don't do this
760 if the MEM has a mode-dependent address. */
761 if ((GET_CODE (op) == LSHIFTRT
762 || GET_CODE (op) == ASHIFTRT)
763 && SCALAR_INT_MODE_P (op_mode)
764 && MEM_P (XEXP (op, 0))
765 && CONST_INT_P (XEXP (op, 1))
766 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
767 && INTVAL (XEXP (op, 1)) > 0
768 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
769 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
770 MEM_ADDR_SPACE (XEXP (op, 0)))
771 && ! MEM_VOLATILE_P (XEXP (op, 0))
772 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
773 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
775 int byte = subreg_lowpart_offset (mode, op_mode);
776 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
777 return adjust_address_nv (XEXP (op, 0), mode,
778 (WORDS_BIG_ENDIAN
779 ? byte - shifted_bytes
780 : byte + shifted_bytes));
783 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
784 (OP:SI foo:SI) if OP is NEG or ABS. */
785 if ((GET_CODE (op) == ABS
786 || GET_CODE (op) == NEG)
787 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
788 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
789 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
790 return simplify_gen_unary (GET_CODE (op), mode,
791 XEXP (XEXP (op, 0), 0), mode);
793 /* (truncate:A (subreg:B (truncate:C X) 0)) is
794 (truncate:A X). */
795 if (GET_CODE (op) == SUBREG
796 && SCALAR_INT_MODE_P (mode)
797 && SCALAR_INT_MODE_P (op_mode)
798 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
799 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
800 && subreg_lowpart_p (op))
802 rtx inner = XEXP (SUBREG_REG (op), 0);
803 if (GET_MODE_PRECISION (mode)
804 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
805 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
806 else
807 /* If subreg above is paradoxical and C is narrower
808 than A, return (subreg:A (truncate:C X) 0). */
809 return simplify_gen_subreg (mode, SUBREG_REG (op),
810 GET_MODE (SUBREG_REG (op)), 0);
813 /* (truncate:A (truncate:B X)) is (truncate:A X). */
814 if (GET_CODE (op) == TRUNCATE)
815 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
816 GET_MODE (XEXP (op, 0)));
818 return NULL_RTX;
821 /* Try to simplify a unary operation CODE whose output mode is to be
822 MODE with input operand OP whose mode was originally OP_MODE.
823 Return zero if no simplification can be made. */
825 simplify_unary_operation (enum rtx_code code, machine_mode mode,
826 rtx op, machine_mode op_mode)
828 rtx trueop, tem;
830 trueop = avoid_constant_pool_reference (op);
832 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
833 if (tem)
834 return tem;
836 return simplify_unary_operation_1 (code, mode, op);
839 /* Perform some simplifications we can do even if the operands
840 aren't constant. */
841 static rtx
842 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
844 enum rtx_code reversed;
845 rtx temp;
847 switch (code)
849 case NOT:
850 /* (not (not X)) == X. */
851 if (GET_CODE (op) == NOT)
852 return XEXP (op, 0);
854 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
855 comparison is all ones. */
856 if (COMPARISON_P (op)
857 && (mode == BImode || STORE_FLAG_VALUE == -1)
858 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
859 return simplify_gen_relational (reversed, mode, VOIDmode,
860 XEXP (op, 0), XEXP (op, 1));
862 /* (not (plus X -1)) can become (neg X). */
863 if (GET_CODE (op) == PLUS
864 && XEXP (op, 1) == constm1_rtx)
865 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
867 /* Similarly, (not (neg X)) is (plus X -1). */
868 if (GET_CODE (op) == NEG)
869 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
870 CONSTM1_RTX (mode));
872 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
873 if (GET_CODE (op) == XOR
874 && CONST_INT_P (XEXP (op, 1))
875 && (temp = simplify_unary_operation (NOT, mode,
876 XEXP (op, 1), mode)) != 0)
877 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
879 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
880 if (GET_CODE (op) == PLUS
881 && CONST_INT_P (XEXP (op, 1))
882 && mode_signbit_p (mode, XEXP (op, 1))
883 && (temp = simplify_unary_operation (NOT, mode,
884 XEXP (op, 1), mode)) != 0)
885 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
888 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
889 operands other than 1, but that is not valid. We could do a
890 similar simplification for (not (lshiftrt C X)) where C is
891 just the sign bit, but this doesn't seem common enough to
892 bother with. */
893 if (GET_CODE (op) == ASHIFT
894 && XEXP (op, 0) == const1_rtx)
896 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
897 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
900 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
901 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
902 so we can perform the above simplification. */
903 if (STORE_FLAG_VALUE == -1
904 && GET_CODE (op) == ASHIFTRT
905 && CONST_INT_P (XEXP (op, 1))
906 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
907 return simplify_gen_relational (GE, mode, VOIDmode,
908 XEXP (op, 0), const0_rtx);
911 if (GET_CODE (op) == SUBREG
912 && subreg_lowpart_p (op)
913 && (GET_MODE_SIZE (GET_MODE (op))
914 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
915 && GET_CODE (SUBREG_REG (op)) == ASHIFT
916 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
918 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
919 rtx x;
921 x = gen_rtx_ROTATE (inner_mode,
922 simplify_gen_unary (NOT, inner_mode, const1_rtx,
923 inner_mode),
924 XEXP (SUBREG_REG (op), 1));
925 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
926 if (temp)
927 return temp;
930 /* Apply De Morgan's laws to reduce number of patterns for machines
931 with negating logical insns (and-not, nand, etc.). If result has
932 only one NOT, put it first, since that is how the patterns are
933 coded. */
934 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
936 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
937 machine_mode op_mode;
939 op_mode = GET_MODE (in1);
940 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
942 op_mode = GET_MODE (in2);
943 if (op_mode == VOIDmode)
944 op_mode = mode;
945 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
947 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
949 rtx tem = in2;
950 in2 = in1; in1 = tem;
953 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
954 mode, in1, in2);
957 /* (not (bswap x)) -> (bswap (not x)). */
958 if (GET_CODE (op) == BSWAP)
960 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
961 return simplify_gen_unary (BSWAP, mode, x, mode);
963 break;
965 case NEG:
966 /* (neg (neg X)) == X. */
967 if (GET_CODE (op) == NEG)
968 return XEXP (op, 0);
970 /* (neg (plus X 1)) can become (not X). */
971 if (GET_CODE (op) == PLUS
972 && XEXP (op, 1) == const1_rtx)
973 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
975 /* Similarly, (neg (not X)) is (plus X 1). */
976 if (GET_CODE (op) == NOT)
977 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
978 CONST1_RTX (mode));
980 /* (neg (minus X Y)) can become (minus Y X). This transformation
981 isn't safe for modes with signed zeros, since if X and Y are
982 both +0, (minus Y X) is the same as (minus X Y). If the
983 rounding mode is towards +infinity (or -infinity) then the two
984 expressions will be rounded differently. */
985 if (GET_CODE (op) == MINUS
986 && !HONOR_SIGNED_ZEROS (mode)
987 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
988 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
990 if (GET_CODE (op) == PLUS
991 && !HONOR_SIGNED_ZEROS (mode)
992 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
994 /* (neg (plus A C)) is simplified to (minus -C A). */
995 if (CONST_SCALAR_INT_P (XEXP (op, 1))
996 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
998 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
999 if (temp)
1000 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1003 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1004 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1005 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1008 /* (neg (mult A B)) becomes (mult A (neg B)).
1009 This works even for floating-point values. */
1010 if (GET_CODE (op) == MULT
1011 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1013 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1014 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1017 /* NEG commutes with ASHIFT since it is multiplication. Only do
1018 this if we can then eliminate the NEG (e.g., if the operand
1019 is a constant). */
1020 if (GET_CODE (op) == ASHIFT)
1022 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1023 if (temp)
1024 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1027 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1028 C is equal to the width of MODE minus 1. */
1029 if (GET_CODE (op) == ASHIFTRT
1030 && CONST_INT_P (XEXP (op, 1))
1031 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1032 return simplify_gen_binary (LSHIFTRT, mode,
1033 XEXP (op, 0), XEXP (op, 1));
1035 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1036 C is equal to the width of MODE minus 1. */
1037 if (GET_CODE (op) == LSHIFTRT
1038 && CONST_INT_P (XEXP (op, 1))
1039 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1040 return simplify_gen_binary (ASHIFTRT, mode,
1041 XEXP (op, 0), XEXP (op, 1));
1043 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1044 if (GET_CODE (op) == XOR
1045 && XEXP (op, 1) == const1_rtx
1046 && nonzero_bits (XEXP (op, 0), mode) == 1)
1047 return plus_constant (mode, XEXP (op, 0), -1);
1049 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1050 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1051 if (GET_CODE (op) == LT
1052 && XEXP (op, 1) == const0_rtx
1053 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1055 machine_mode inner = GET_MODE (XEXP (op, 0));
1056 int isize = GET_MODE_PRECISION (inner);
1057 if (STORE_FLAG_VALUE == 1)
1059 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1060 GEN_INT (isize - 1));
1061 if (mode == inner)
1062 return temp;
1063 if (GET_MODE_PRECISION (mode) > isize)
1064 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1065 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1067 else if (STORE_FLAG_VALUE == -1)
1069 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1070 GEN_INT (isize - 1));
1071 if (mode == inner)
1072 return temp;
1073 if (GET_MODE_PRECISION (mode) > isize)
1074 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1075 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1078 break;
1080 case TRUNCATE:
1081 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1082 with the umulXi3_highpart patterns. */
1083 if (GET_CODE (op) == LSHIFTRT
1084 && GET_CODE (XEXP (op, 0)) == MULT)
1085 break;
1087 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1089 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1091 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1092 if (temp)
1093 return temp;
1095 /* We can't handle truncation to a partial integer mode here
1096 because we don't know the real bitsize of the partial
1097 integer mode. */
1098 break;
1101 if (GET_MODE (op) != VOIDmode)
1103 temp = simplify_truncation (mode, op, GET_MODE (op));
1104 if (temp)
1105 return temp;
1108 /* If we know that the value is already truncated, we can
1109 replace the TRUNCATE with a SUBREG. */
1110 if (GET_MODE_NUNITS (mode) == 1
1111 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1112 || truncated_to_mode (mode, op)))
1114 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1115 if (temp)
1116 return temp;
1119 /* A truncate of a comparison can be replaced with a subreg if
1120 STORE_FLAG_VALUE permits. This is like the previous test,
1121 but it works even if the comparison is done in a mode larger
1122 than HOST_BITS_PER_WIDE_INT. */
1123 if (HWI_COMPUTABLE_MODE_P (mode)
1124 && COMPARISON_P (op)
1125 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1127 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1128 if (temp)
1129 return temp;
1132 /* A truncate of a memory is just loading the low part of the memory
1133 if we are not changing the meaning of the address. */
1134 if (GET_CODE (op) == MEM
1135 && !VECTOR_MODE_P (mode)
1136 && !MEM_VOLATILE_P (op)
1137 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1139 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1140 if (temp)
1141 return temp;
1144 break;
1146 case FLOAT_TRUNCATE:
1147 if (DECIMAL_FLOAT_MODE_P (mode))
1148 break;
1150 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1151 if (GET_CODE (op) == FLOAT_EXTEND
1152 && GET_MODE (XEXP (op, 0)) == mode)
1153 return XEXP (op, 0);
1155 /* (float_truncate:SF (float_truncate:DF foo:XF))
1156 = (float_truncate:SF foo:XF).
1157 This may eliminate double rounding, so it is unsafe.
1159 (float_truncate:SF (float_extend:XF foo:DF))
1160 = (float_truncate:SF foo:DF).
1162 (float_truncate:DF (float_extend:XF foo:SF))
1163 = (float_extend:DF foo:SF). */
1164 if ((GET_CODE (op) == FLOAT_TRUNCATE
1165 && flag_unsafe_math_optimizations)
1166 || GET_CODE (op) == FLOAT_EXTEND)
1167 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1168 0)))
1169 > GET_MODE_SIZE (mode)
1170 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1171 mode,
1172 XEXP (op, 0), mode);
1174 /* (float_truncate (float x)) is (float x) */
1175 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1176 && (flag_unsafe_math_optimizations
1177 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1178 && ((unsigned)significand_size (GET_MODE (op))
1179 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1180 - num_sign_bit_copies (XEXP (op, 0),
1181 GET_MODE (XEXP (op, 0))))))))
1182 return simplify_gen_unary (GET_CODE (op), mode,
1183 XEXP (op, 0),
1184 GET_MODE (XEXP (op, 0)));
1186 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1187 (OP:SF foo:SF) if OP is NEG or ABS. */
1188 if ((GET_CODE (op) == ABS
1189 || GET_CODE (op) == NEG)
1190 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1191 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1192 return simplify_gen_unary (GET_CODE (op), mode,
1193 XEXP (XEXP (op, 0), 0), mode);
1195 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1196 is (float_truncate:SF x). */
1197 if (GET_CODE (op) == SUBREG
1198 && subreg_lowpart_p (op)
1199 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1200 return SUBREG_REG (op);
1201 break;
1203 case FLOAT_EXTEND:
1204 if (DECIMAL_FLOAT_MODE_P (mode))
1205 break;
1207 /* (float_extend (float_extend x)) is (float_extend x)
1209 (float_extend (float x)) is (float x) assuming that double
1210 rounding can't happen.
1212 if (GET_CODE (op) == FLOAT_EXTEND
1213 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1214 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1215 && ((unsigned)significand_size (GET_MODE (op))
1216 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1217 - num_sign_bit_copies (XEXP (op, 0),
1218 GET_MODE (XEXP (op, 0)))))))
1219 return simplify_gen_unary (GET_CODE (op), mode,
1220 XEXP (op, 0),
1221 GET_MODE (XEXP (op, 0)));
1223 break;
1225 case ABS:
1226 /* (abs (neg <foo>)) -> (abs <foo>) */
1227 if (GET_CODE (op) == NEG)
1228 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1229 GET_MODE (XEXP (op, 0)));
1231 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1232 do nothing. */
1233 if (GET_MODE (op) == VOIDmode)
1234 break;
1236 /* If operand is something known to be positive, ignore the ABS. */
1237 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1238 || val_signbit_known_clear_p (GET_MODE (op),
1239 nonzero_bits (op, GET_MODE (op))))
1240 return op;
1242 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1243 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1244 return gen_rtx_NEG (mode, op);
1246 break;
1248 case FFS:
1249 /* (ffs (*_extend <X>)) = (ffs <X>) */
1250 if (GET_CODE (op) == SIGN_EXTEND
1251 || GET_CODE (op) == ZERO_EXTEND)
1252 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1253 GET_MODE (XEXP (op, 0)));
1254 break;
1256 case POPCOUNT:
1257 switch (GET_CODE (op))
1259 case BSWAP:
1260 case ZERO_EXTEND:
1261 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1262 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1263 GET_MODE (XEXP (op, 0)));
1265 case ROTATE:
1266 case ROTATERT:
1267 /* Rotations don't affect popcount. */
1268 if (!side_effects_p (XEXP (op, 1)))
1269 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1270 GET_MODE (XEXP (op, 0)));
1271 break;
1273 default:
1274 break;
1276 break;
1278 case PARITY:
1279 switch (GET_CODE (op))
1281 case NOT:
1282 case BSWAP:
1283 case ZERO_EXTEND:
1284 case SIGN_EXTEND:
1285 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1286 GET_MODE (XEXP (op, 0)));
1288 case ROTATE:
1289 case ROTATERT:
1290 /* Rotations don't affect parity. */
1291 if (!side_effects_p (XEXP (op, 1)))
1292 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1293 GET_MODE (XEXP (op, 0)));
1294 break;
1296 default:
1297 break;
1299 break;
1301 case BSWAP:
1302 /* (bswap (bswap x)) -> x. */
1303 if (GET_CODE (op) == BSWAP)
1304 return XEXP (op, 0);
1305 break;
1307 case FLOAT:
1308 /* (float (sign_extend <X>)) = (float <X>). */
1309 if (GET_CODE (op) == SIGN_EXTEND)
1310 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1311 GET_MODE (XEXP (op, 0)));
1312 break;
1314 case SIGN_EXTEND:
1315 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1316 becomes just the MINUS if its mode is MODE. This allows
1317 folding switch statements on machines using casesi (such as
1318 the VAX). */
1319 if (GET_CODE (op) == TRUNCATE
1320 && GET_MODE (XEXP (op, 0)) == mode
1321 && GET_CODE (XEXP (op, 0)) == MINUS
1322 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1323 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1324 return XEXP (op, 0);
1326 /* Extending a widening multiplication should be canonicalized to
1327 a wider widening multiplication. */
1328 if (GET_CODE (op) == MULT)
1330 rtx lhs = XEXP (op, 0);
1331 rtx rhs = XEXP (op, 1);
1332 enum rtx_code lcode = GET_CODE (lhs);
1333 enum rtx_code rcode = GET_CODE (rhs);
1335 /* Widening multiplies usually extend both operands, but sometimes
1336 they use a shift to extract a portion of a register. */
1337 if ((lcode == SIGN_EXTEND
1338 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1339 && (rcode == SIGN_EXTEND
1340 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1342 machine_mode lmode = GET_MODE (lhs);
1343 machine_mode rmode = GET_MODE (rhs);
1344 int bits;
1346 if (lcode == ASHIFTRT)
1347 /* Number of bits not shifted off the end. */
1348 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1349 else /* lcode == SIGN_EXTEND */
1350 /* Size of inner mode. */
1351 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1353 if (rcode == ASHIFTRT)
1354 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1355 else /* rcode == SIGN_EXTEND */
1356 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1358 /* We can only widen multiplies if the result is mathematiclly
1359 equivalent. I.e. if overflow was impossible. */
1360 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1361 return simplify_gen_binary
1362 (MULT, mode,
1363 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1364 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1368 /* Check for a sign extension of a subreg of a promoted
1369 variable, where the promotion is sign-extended, and the
1370 target mode is the same as the variable's promotion. */
1371 if (GET_CODE (op) == SUBREG
1372 && SUBREG_PROMOTED_VAR_P (op)
1373 && SUBREG_PROMOTED_SIGNED_P (op)
1374 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1376 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1377 if (temp)
1378 return temp;
1381 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1382 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1383 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1385 gcc_assert (GET_MODE_PRECISION (mode)
1386 > GET_MODE_PRECISION (GET_MODE (op)));
1387 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1388 GET_MODE (XEXP (op, 0)));
1391 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1392 is (sign_extend:M (subreg:O <X>)) if there is mode with
1393 GET_MODE_BITSIZE (N) - I bits.
1394 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1395 is similarly (zero_extend:M (subreg:O <X>)). */
1396 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1397 && GET_CODE (XEXP (op, 0)) == ASHIFT
1398 && CONST_INT_P (XEXP (op, 1))
1399 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1400 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1402 machine_mode tmode
1403 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1404 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1405 gcc_assert (GET_MODE_BITSIZE (mode)
1406 > GET_MODE_BITSIZE (GET_MODE (op)));
1407 if (tmode != BLKmode)
1409 rtx inner =
1410 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1411 if (inner)
1412 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1413 ? SIGN_EXTEND : ZERO_EXTEND,
1414 mode, inner, tmode);
1418 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1419 /* As we do not know which address space the pointer is referring to,
1420 we can do this only if the target does not support different pointer
1421 or address modes depending on the address space. */
1422 if (target_default_pointer_address_modes_p ()
1423 && ! POINTERS_EXTEND_UNSIGNED
1424 && mode == Pmode && GET_MODE (op) == ptr_mode
1425 && (CONSTANT_P (op)
1426 || (GET_CODE (op) == SUBREG
1427 && REG_P (SUBREG_REG (op))
1428 && REG_POINTER (SUBREG_REG (op))
1429 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1430 return convert_memory_address (Pmode, op);
1431 #endif
1432 break;
1434 case ZERO_EXTEND:
1435 /* Check for a zero extension of a subreg of a promoted
1436 variable, where the promotion is zero-extended, and the
1437 target mode is the same as the variable's promotion. */
1438 if (GET_CODE (op) == SUBREG
1439 && SUBREG_PROMOTED_VAR_P (op)
1440 && SUBREG_PROMOTED_UNSIGNED_P (op)
1441 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1443 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1444 if (temp)
1445 return temp;
1448 /* Extending a widening multiplication should be canonicalized to
1449 a wider widening multiplication. */
1450 if (GET_CODE (op) == MULT)
1452 rtx lhs = XEXP (op, 0);
1453 rtx rhs = XEXP (op, 1);
1454 enum rtx_code lcode = GET_CODE (lhs);
1455 enum rtx_code rcode = GET_CODE (rhs);
1457 /* Widening multiplies usually extend both operands, but sometimes
1458 they use a shift to extract a portion of a register. */
1459 if ((lcode == ZERO_EXTEND
1460 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1461 && (rcode == ZERO_EXTEND
1462 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1464 machine_mode lmode = GET_MODE (lhs);
1465 machine_mode rmode = GET_MODE (rhs);
1466 int bits;
1468 if (lcode == LSHIFTRT)
1469 /* Number of bits not shifted off the end. */
1470 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1471 else /* lcode == ZERO_EXTEND */
1472 /* Size of inner mode. */
1473 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1475 if (rcode == LSHIFTRT)
1476 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1477 else /* rcode == ZERO_EXTEND */
1478 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1480 /* We can only widen multiplies if the result is mathematiclly
1481 equivalent. I.e. if overflow was impossible. */
1482 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1483 return simplify_gen_binary
1484 (MULT, mode,
1485 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1486 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1490 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1491 if (GET_CODE (op) == ZERO_EXTEND)
1492 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1493 GET_MODE (XEXP (op, 0)));
1495 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1496 is (zero_extend:M (subreg:O <X>)) if there is mode with
1497 GET_MODE_PRECISION (N) - I bits. */
1498 if (GET_CODE (op) == LSHIFTRT
1499 && GET_CODE (XEXP (op, 0)) == ASHIFT
1500 && CONST_INT_P (XEXP (op, 1))
1501 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1502 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1504 machine_mode tmode
1505 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1506 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1507 if (tmode != BLKmode)
1509 rtx inner =
1510 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1511 if (inner)
1512 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1516 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1517 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1518 of mode N. E.g.
1519 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1520 (and:SI (reg:SI) (const_int 63)). */
1521 if (GET_CODE (op) == SUBREG
1522 && GET_MODE_PRECISION (GET_MODE (op))
1523 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1524 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1525 <= HOST_BITS_PER_WIDE_INT
1526 && GET_MODE_PRECISION (mode)
1527 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1528 && subreg_lowpart_p (op)
1529 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1530 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1532 if (GET_MODE_PRECISION (mode)
1533 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1534 return SUBREG_REG (op);
1535 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1536 GET_MODE (SUBREG_REG (op)));
1539 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1540 /* As we do not know which address space the pointer is referring to,
1541 we can do this only if the target does not support different pointer
1542 or address modes depending on the address space. */
1543 if (target_default_pointer_address_modes_p ()
1544 && POINTERS_EXTEND_UNSIGNED > 0
1545 && mode == Pmode && GET_MODE (op) == ptr_mode
1546 && (CONSTANT_P (op)
1547 || (GET_CODE (op) == SUBREG
1548 && REG_P (SUBREG_REG (op))
1549 && REG_POINTER (SUBREG_REG (op))
1550 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1551 return convert_memory_address (Pmode, op);
1552 #endif
1553 break;
1555 default:
1556 break;
1559 return 0;
1562 /* Try to compute the value of a unary operation CODE whose output mode is to
1563 be MODE with input operand OP whose mode was originally OP_MODE.
1564 Return zero if the value cannot be computed. */
1566 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1567 rtx op, machine_mode op_mode)
1569 unsigned int width = GET_MODE_PRECISION (mode);
1571 if (code == VEC_DUPLICATE)
1573 gcc_assert (VECTOR_MODE_P (mode));
1574 if (GET_MODE (op) != VOIDmode)
1576 if (!VECTOR_MODE_P (GET_MODE (op)))
1577 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1578 else
1579 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1580 (GET_MODE (op)));
1582 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1583 || GET_CODE (op) == CONST_VECTOR)
1585 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1586 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1587 rtvec v = rtvec_alloc (n_elts);
1588 unsigned int i;
1590 if (GET_CODE (op) != CONST_VECTOR)
1591 for (i = 0; i < n_elts; i++)
1592 RTVEC_ELT (v, i) = op;
1593 else
1595 machine_mode inmode = GET_MODE (op);
1596 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1597 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1599 gcc_assert (in_n_elts < n_elts);
1600 gcc_assert ((n_elts % in_n_elts) == 0);
1601 for (i = 0; i < n_elts; i++)
1602 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1604 return gen_rtx_CONST_VECTOR (mode, v);
1608 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1610 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1611 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1612 machine_mode opmode = GET_MODE (op);
1613 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1614 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1615 rtvec v = rtvec_alloc (n_elts);
1616 unsigned int i;
1618 gcc_assert (op_n_elts == n_elts);
1619 for (i = 0; i < n_elts; i++)
1621 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1622 CONST_VECTOR_ELT (op, i),
1623 GET_MODE_INNER (opmode));
1624 if (!x)
1625 return 0;
1626 RTVEC_ELT (v, i) = x;
1628 return gen_rtx_CONST_VECTOR (mode, v);
1631 /* The order of these tests is critical so that, for example, we don't
1632 check the wrong mode (input vs. output) for a conversion operation,
1633 such as FIX. At some point, this should be simplified. */
1635 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1637 REAL_VALUE_TYPE d;
1639 if (op_mode == VOIDmode)
1641 /* CONST_INT have VOIDmode as the mode. We assume that all
1642 the bits of the constant are significant, though, this is
1643 a dangerous assumption as many times CONST_INTs are
1644 created and used with garbage in the bits outside of the
1645 precision of the implied mode of the const_int. */
1646 op_mode = MAX_MODE_INT;
1649 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1650 d = real_value_truncate (mode, d);
1651 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1653 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1655 REAL_VALUE_TYPE d;
1657 if (op_mode == VOIDmode)
1659 /* CONST_INT have VOIDmode as the mode. We assume that all
1660 the bits of the constant are significant, though, this is
1661 a dangerous assumption as many times CONST_INTs are
1662 created and used with garbage in the bits outside of the
1663 precision of the implied mode of the const_int. */
1664 op_mode = MAX_MODE_INT;
1667 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1668 d = real_value_truncate (mode, d);
1669 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1672 if (CONST_SCALAR_INT_P (op) && width > 0)
1674 wide_int result;
1675 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1676 rtx_mode_t op0 = std::make_pair (op, imode);
1677 int int_value;
1679 #if TARGET_SUPPORTS_WIDE_INT == 0
1680 /* This assert keeps the simplification from producing a result
1681 that cannot be represented in a CONST_DOUBLE but a lot of
1682 upstream callers expect that this function never fails to
1683 simplify something and so you if you added this to the test
1684 above the code would die later anyway. If this assert
1685 happens, you just need to make the port support wide int. */
1686 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1687 #endif
1689 switch (code)
1691 case NOT:
1692 result = wi::bit_not (op0);
1693 break;
1695 case NEG:
1696 result = wi::neg (op0);
1697 break;
1699 case ABS:
1700 result = wi::abs (op0);
1701 break;
1703 case FFS:
1704 result = wi::shwi (wi::ffs (op0), mode);
1705 break;
1707 case CLZ:
1708 if (wi::ne_p (op0, 0))
1709 int_value = wi::clz (op0);
1710 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1711 int_value = GET_MODE_PRECISION (mode);
1712 result = wi::shwi (int_value, mode);
1713 break;
1715 case CLRSB:
1716 result = wi::shwi (wi::clrsb (op0), mode);
1717 break;
1719 case CTZ:
1720 if (wi::ne_p (op0, 0))
1721 int_value = wi::ctz (op0);
1722 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1723 int_value = GET_MODE_PRECISION (mode);
1724 result = wi::shwi (int_value, mode);
1725 break;
1727 case POPCOUNT:
1728 result = wi::shwi (wi::popcount (op0), mode);
1729 break;
1731 case PARITY:
1732 result = wi::shwi (wi::parity (op0), mode);
1733 break;
1735 case BSWAP:
1736 result = wide_int (op0).bswap ();
1737 break;
1739 case TRUNCATE:
1740 case ZERO_EXTEND:
1741 result = wide_int::from (op0, width, UNSIGNED);
1742 break;
1744 case SIGN_EXTEND:
1745 result = wide_int::from (op0, width, SIGNED);
1746 break;
1748 case SQRT:
1749 default:
1750 return 0;
1753 return immed_wide_int_const (result, mode);
1756 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1757 && SCALAR_FLOAT_MODE_P (mode)
1758 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1760 REAL_VALUE_TYPE d;
1761 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1763 switch (code)
1765 case SQRT:
1766 return 0;
1767 case ABS:
1768 d = real_value_abs (&d);
1769 break;
1770 case NEG:
1771 d = real_value_negate (&d);
1772 break;
1773 case FLOAT_TRUNCATE:
1774 d = real_value_truncate (mode, d);
1775 break;
1776 case FLOAT_EXTEND:
1777 /* All this does is change the mode, unless changing
1778 mode class. */
1779 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1780 real_convert (&d, mode, &d);
1781 break;
1782 case FIX:
1783 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1784 break;
1785 case NOT:
1787 long tmp[4];
1788 int i;
1790 real_to_target (tmp, &d, GET_MODE (op));
1791 for (i = 0; i < 4; i++)
1792 tmp[i] = ~tmp[i];
1793 real_from_target (&d, tmp, mode);
1794 break;
1796 default:
1797 gcc_unreachable ();
1799 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1801 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1802 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1803 && GET_MODE_CLASS (mode) == MODE_INT
1804 && width > 0)
1806 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1807 operators are intentionally left unspecified (to ease implementation
1808 by target backends), for consistency, this routine implements the
1809 same semantics for constant folding as used by the middle-end. */
1811 /* This was formerly used only for non-IEEE float.
1812 eggert@twinsun.com says it is safe for IEEE also. */
1813 REAL_VALUE_TYPE x, t;
1814 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1815 wide_int wmax, wmin;
1816 /* This is part of the abi to real_to_integer, but we check
1817 things before making this call. */
1818 bool fail;
1820 switch (code)
1822 case FIX:
1823 if (REAL_VALUE_ISNAN (x))
1824 return const0_rtx;
1826 /* Test against the signed upper bound. */
1827 wmax = wi::max_value (width, SIGNED);
1828 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1829 if (REAL_VALUES_LESS (t, x))
1830 return immed_wide_int_const (wmax, mode);
1832 /* Test against the signed lower bound. */
1833 wmin = wi::min_value (width, SIGNED);
1834 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1835 if (REAL_VALUES_LESS (x, t))
1836 return immed_wide_int_const (wmin, mode);
1838 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1839 break;
1841 case UNSIGNED_FIX:
1842 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1843 return const0_rtx;
1845 /* Test against the unsigned upper bound. */
1846 wmax = wi::max_value (width, UNSIGNED);
1847 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1848 if (REAL_VALUES_LESS (t, x))
1849 return immed_wide_int_const (wmax, mode);
1851 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1852 mode);
1853 break;
1855 default:
1856 gcc_unreachable ();
1860 return NULL_RTX;
1863 /* Subroutine of simplify_binary_operation to simplify a binary operation
1864 CODE that can commute with byte swapping, with result mode MODE and
1865 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1866 Return zero if no simplification or canonicalization is possible. */
1868 static rtx
1869 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1870 rtx op0, rtx op1)
1872 rtx tem;
1874 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1875 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1877 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1878 simplify_gen_unary (BSWAP, mode, op1, mode));
1879 return simplify_gen_unary (BSWAP, mode, tem, mode);
1882 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1883 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1885 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1886 return simplify_gen_unary (BSWAP, mode, tem, mode);
1889 return NULL_RTX;
1892 /* Subroutine of simplify_binary_operation to simplify a commutative,
1893 associative binary operation CODE with result mode MODE, operating
1894 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1895 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1896 canonicalization is possible. */
1898 static rtx
1899 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1900 rtx op0, rtx op1)
1902 rtx tem;
1904 /* Linearize the operator to the left. */
1905 if (GET_CODE (op1) == code)
1907 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1908 if (GET_CODE (op0) == code)
1910 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1911 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1914 /* "a op (b op c)" becomes "(b op c) op a". */
1915 if (! swap_commutative_operands_p (op1, op0))
1916 return simplify_gen_binary (code, mode, op1, op0);
1918 std::swap (op0, op1);
1921 if (GET_CODE (op0) == code)
1923 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1924 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1926 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1927 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1930 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1931 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1932 if (tem != 0)
1933 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1935 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1936 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1937 if (tem != 0)
1938 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1941 return 0;
1945 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1946 and OP1. Return 0 if no simplification is possible.
1948 Don't use this for relational operations such as EQ or LT.
1949 Use simplify_relational_operation instead. */
1951 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1952 rtx op0, rtx op1)
1954 rtx trueop0, trueop1;
1955 rtx tem;
1957 /* Relational operations don't work here. We must know the mode
1958 of the operands in order to do the comparison correctly.
1959 Assuming a full word can give incorrect results.
1960 Consider comparing 128 with -128 in QImode. */
1961 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1962 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1964 /* Make sure the constant is second. */
1965 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1966 && swap_commutative_operands_p (op0, op1))
1967 std::swap (op0, op1);
1969 trueop0 = avoid_constant_pool_reference (op0);
1970 trueop1 = avoid_constant_pool_reference (op1);
1972 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1973 if (tem)
1974 return tem;
1975 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1978 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1979 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1980 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1981 actual constants. */
1983 static rtx
1984 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
1985 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1987 rtx tem, reversed, opleft, opright;
1988 HOST_WIDE_INT val;
1989 unsigned int width = GET_MODE_PRECISION (mode);
1991 /* Even if we can't compute a constant result,
1992 there are some cases worth simplifying. */
1994 switch (code)
1996 case PLUS:
1997 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1998 when x is NaN, infinite, or finite and nonzero. They aren't
1999 when x is -0 and the rounding mode is not towards -infinity,
2000 since (-0) + 0 is then 0. */
2001 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2002 return op0;
2004 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2005 transformations are safe even for IEEE. */
2006 if (GET_CODE (op0) == NEG)
2007 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2008 else if (GET_CODE (op1) == NEG)
2009 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2011 /* (~a) + 1 -> -a */
2012 if (INTEGRAL_MODE_P (mode)
2013 && GET_CODE (op0) == NOT
2014 && trueop1 == const1_rtx)
2015 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2017 /* Handle both-operands-constant cases. We can only add
2018 CONST_INTs to constants since the sum of relocatable symbols
2019 can't be handled by most assemblers. Don't add CONST_INT
2020 to CONST_INT since overflow won't be computed properly if wider
2021 than HOST_BITS_PER_WIDE_INT. */
2023 if ((GET_CODE (op0) == CONST
2024 || GET_CODE (op0) == SYMBOL_REF
2025 || GET_CODE (op0) == LABEL_REF)
2026 && CONST_INT_P (op1))
2027 return plus_constant (mode, op0, INTVAL (op1));
2028 else if ((GET_CODE (op1) == CONST
2029 || GET_CODE (op1) == SYMBOL_REF
2030 || GET_CODE (op1) == LABEL_REF)
2031 && CONST_INT_P (op0))
2032 return plus_constant (mode, op1, INTVAL (op0));
2034 /* See if this is something like X * C - X or vice versa or
2035 if the multiplication is written as a shift. If so, we can
2036 distribute and make a new multiply, shift, or maybe just
2037 have X (if C is 2 in the example above). But don't make
2038 something more expensive than we had before. */
2040 if (SCALAR_INT_MODE_P (mode))
2042 rtx lhs = op0, rhs = op1;
2044 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2045 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2047 if (GET_CODE (lhs) == NEG)
2049 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2050 lhs = XEXP (lhs, 0);
2052 else if (GET_CODE (lhs) == MULT
2053 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2055 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2056 lhs = XEXP (lhs, 0);
2058 else if (GET_CODE (lhs) == ASHIFT
2059 && CONST_INT_P (XEXP (lhs, 1))
2060 && INTVAL (XEXP (lhs, 1)) >= 0
2061 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2063 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2064 GET_MODE_PRECISION (mode));
2065 lhs = XEXP (lhs, 0);
2068 if (GET_CODE (rhs) == NEG)
2070 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2071 rhs = XEXP (rhs, 0);
2073 else if (GET_CODE (rhs) == MULT
2074 && CONST_INT_P (XEXP (rhs, 1)))
2076 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2077 rhs = XEXP (rhs, 0);
2079 else if (GET_CODE (rhs) == ASHIFT
2080 && CONST_INT_P (XEXP (rhs, 1))
2081 && INTVAL (XEXP (rhs, 1)) >= 0
2082 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2084 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2085 GET_MODE_PRECISION (mode));
2086 rhs = XEXP (rhs, 0);
2089 if (rtx_equal_p (lhs, rhs))
2091 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2092 rtx coeff;
2093 bool speed = optimize_function_for_speed_p (cfun);
2095 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2097 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2098 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2099 ? tem : 0;
2103 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2104 if (CONST_SCALAR_INT_P (op1)
2105 && GET_CODE (op0) == XOR
2106 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2107 && mode_signbit_p (mode, op1))
2108 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2109 simplify_gen_binary (XOR, mode, op1,
2110 XEXP (op0, 1)));
2112 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2113 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2114 && GET_CODE (op0) == MULT
2115 && GET_CODE (XEXP (op0, 0)) == NEG)
2117 rtx in1, in2;
2119 in1 = XEXP (XEXP (op0, 0), 0);
2120 in2 = XEXP (op0, 1);
2121 return simplify_gen_binary (MINUS, mode, op1,
2122 simplify_gen_binary (MULT, mode,
2123 in1, in2));
2126 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2127 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2128 is 1. */
2129 if (COMPARISON_P (op0)
2130 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2131 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2132 && (reversed = reversed_comparison (op0, mode)))
2133 return
2134 simplify_gen_unary (NEG, mode, reversed, mode);
2136 /* If one of the operands is a PLUS or a MINUS, see if we can
2137 simplify this by the associative law.
2138 Don't use the associative law for floating point.
2139 The inaccuracy makes it nonassociative,
2140 and subtle programs can break if operations are associated. */
2142 if (INTEGRAL_MODE_P (mode)
2143 && (plus_minus_operand_p (op0)
2144 || plus_minus_operand_p (op1))
2145 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2146 return tem;
2148 /* Reassociate floating point addition only when the user
2149 specifies associative math operations. */
2150 if (FLOAT_MODE_P (mode)
2151 && flag_associative_math)
2153 tem = simplify_associative_operation (code, mode, op0, op1);
2154 if (tem)
2155 return tem;
2157 break;
2159 case COMPARE:
2160 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2161 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2162 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2163 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2165 rtx xop00 = XEXP (op0, 0);
2166 rtx xop10 = XEXP (op1, 0);
2168 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2169 return xop00;
2171 if (REG_P (xop00) && REG_P (xop10)
2172 && GET_MODE (xop00) == GET_MODE (xop10)
2173 && REGNO (xop00) == REGNO (xop10)
2174 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2175 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2176 return xop00;
2178 break;
2180 case MINUS:
2181 /* We can't assume x-x is 0 even with non-IEEE floating point,
2182 but since it is zero except in very strange circumstances, we
2183 will treat it as zero with -ffinite-math-only. */
2184 if (rtx_equal_p (trueop0, trueop1)
2185 && ! side_effects_p (op0)
2186 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2187 return CONST0_RTX (mode);
2189 /* Change subtraction from zero into negation. (0 - x) is the
2190 same as -x when x is NaN, infinite, or finite and nonzero.
2191 But if the mode has signed zeros, and does not round towards
2192 -infinity, then 0 - 0 is 0, not -0. */
2193 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2194 return simplify_gen_unary (NEG, mode, op1, mode);
2196 /* (-1 - a) is ~a. */
2197 if (trueop0 == constm1_rtx)
2198 return simplify_gen_unary (NOT, mode, op1, mode);
2200 /* Subtracting 0 has no effect unless the mode has signed zeros
2201 and supports rounding towards -infinity. In such a case,
2202 0 - 0 is -0. */
2203 if (!(HONOR_SIGNED_ZEROS (mode)
2204 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2205 && trueop1 == CONST0_RTX (mode))
2206 return op0;
2208 /* See if this is something like X * C - X or vice versa or
2209 if the multiplication is written as a shift. If so, we can
2210 distribute and make a new multiply, shift, or maybe just
2211 have X (if C is 2 in the example above). But don't make
2212 something more expensive than we had before. */
2214 if (SCALAR_INT_MODE_P (mode))
2216 rtx lhs = op0, rhs = op1;
2218 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2219 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2221 if (GET_CODE (lhs) == NEG)
2223 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2224 lhs = XEXP (lhs, 0);
2226 else if (GET_CODE (lhs) == MULT
2227 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2229 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2230 lhs = XEXP (lhs, 0);
2232 else if (GET_CODE (lhs) == ASHIFT
2233 && CONST_INT_P (XEXP (lhs, 1))
2234 && INTVAL (XEXP (lhs, 1)) >= 0
2235 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2237 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2238 GET_MODE_PRECISION (mode));
2239 lhs = XEXP (lhs, 0);
2242 if (GET_CODE (rhs) == NEG)
2244 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2245 rhs = XEXP (rhs, 0);
2247 else if (GET_CODE (rhs) == MULT
2248 && CONST_INT_P (XEXP (rhs, 1)))
2250 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2251 rhs = XEXP (rhs, 0);
2253 else if (GET_CODE (rhs) == ASHIFT
2254 && CONST_INT_P (XEXP (rhs, 1))
2255 && INTVAL (XEXP (rhs, 1)) >= 0
2256 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2258 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2259 GET_MODE_PRECISION (mode));
2260 negcoeff1 = -negcoeff1;
2261 rhs = XEXP (rhs, 0);
2264 if (rtx_equal_p (lhs, rhs))
2266 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2267 rtx coeff;
2268 bool speed = optimize_function_for_speed_p (cfun);
2270 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2272 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2273 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2274 ? tem : 0;
2278 /* (a - (-b)) -> (a + b). True even for IEEE. */
2279 if (GET_CODE (op1) == NEG)
2280 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2282 /* (-x - c) may be simplified as (-c - x). */
2283 if (GET_CODE (op0) == NEG
2284 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2286 tem = simplify_unary_operation (NEG, mode, op1, mode);
2287 if (tem)
2288 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2291 /* Don't let a relocatable value get a negative coeff. */
2292 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2293 return simplify_gen_binary (PLUS, mode,
2294 op0,
2295 neg_const_int (mode, op1));
2297 /* (x - (x & y)) -> (x & ~y) */
2298 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2300 if (rtx_equal_p (op0, XEXP (op1, 0)))
2302 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2303 GET_MODE (XEXP (op1, 1)));
2304 return simplify_gen_binary (AND, mode, op0, tem);
2306 if (rtx_equal_p (op0, XEXP (op1, 1)))
2308 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2309 GET_MODE (XEXP (op1, 0)));
2310 return simplify_gen_binary (AND, mode, op0, tem);
2314 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2315 by reversing the comparison code if valid. */
2316 if (STORE_FLAG_VALUE == 1
2317 && trueop0 == const1_rtx
2318 && COMPARISON_P (op1)
2319 && (reversed = reversed_comparison (op1, mode)))
2320 return reversed;
2322 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2323 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2324 && GET_CODE (op1) == MULT
2325 && GET_CODE (XEXP (op1, 0)) == NEG)
2327 rtx in1, in2;
2329 in1 = XEXP (XEXP (op1, 0), 0);
2330 in2 = XEXP (op1, 1);
2331 return simplify_gen_binary (PLUS, mode,
2332 simplify_gen_binary (MULT, mode,
2333 in1, in2),
2334 op0);
2337 /* Canonicalize (minus (neg A) (mult B C)) to
2338 (minus (mult (neg B) C) A). */
2339 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2340 && GET_CODE (op1) == MULT
2341 && GET_CODE (op0) == NEG)
2343 rtx in1, in2;
2345 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2346 in2 = XEXP (op1, 1);
2347 return simplify_gen_binary (MINUS, mode,
2348 simplify_gen_binary (MULT, mode,
2349 in1, in2),
2350 XEXP (op0, 0));
2353 /* If one of the operands is a PLUS or a MINUS, see if we can
2354 simplify this by the associative law. This will, for example,
2355 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2356 Don't use the associative law for floating point.
2357 The inaccuracy makes it nonassociative,
2358 and subtle programs can break if operations are associated. */
2360 if (INTEGRAL_MODE_P (mode)
2361 && (plus_minus_operand_p (op0)
2362 || plus_minus_operand_p (op1))
2363 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2364 return tem;
2365 break;
2367 case MULT:
2368 if (trueop1 == constm1_rtx)
2369 return simplify_gen_unary (NEG, mode, op0, mode);
2371 if (GET_CODE (op0) == NEG)
2373 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2374 /* If op1 is a MULT as well and simplify_unary_operation
2375 just moved the NEG to the second operand, simplify_gen_binary
2376 below could through simplify_associative_operation move
2377 the NEG around again and recurse endlessly. */
2378 if (temp
2379 && GET_CODE (op1) == MULT
2380 && GET_CODE (temp) == MULT
2381 && XEXP (op1, 0) == XEXP (temp, 0)
2382 && GET_CODE (XEXP (temp, 1)) == NEG
2383 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2384 temp = NULL_RTX;
2385 if (temp)
2386 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2388 if (GET_CODE (op1) == NEG)
2390 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2391 /* If op0 is a MULT as well and simplify_unary_operation
2392 just moved the NEG to the second operand, simplify_gen_binary
2393 below could through simplify_associative_operation move
2394 the NEG around again and recurse endlessly. */
2395 if (temp
2396 && GET_CODE (op0) == MULT
2397 && GET_CODE (temp) == MULT
2398 && XEXP (op0, 0) == XEXP (temp, 0)
2399 && GET_CODE (XEXP (temp, 1)) == NEG
2400 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2401 temp = NULL_RTX;
2402 if (temp)
2403 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2406 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2407 x is NaN, since x * 0 is then also NaN. Nor is it valid
2408 when the mode has signed zeros, since multiplying a negative
2409 number by 0 will give -0, not 0. */
2410 if (!HONOR_NANS (mode)
2411 && !HONOR_SIGNED_ZEROS (mode)
2412 && trueop1 == CONST0_RTX (mode)
2413 && ! side_effects_p (op0))
2414 return op1;
2416 /* In IEEE floating point, x*1 is not equivalent to x for
2417 signalling NaNs. */
2418 if (!HONOR_SNANS (mode)
2419 && trueop1 == CONST1_RTX (mode))
2420 return op0;
2422 /* Convert multiply by constant power of two into shift. */
2423 if (CONST_SCALAR_INT_P (trueop1))
2425 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2426 if (val >= 0)
2427 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2430 /* x*2 is x+x and x*(-1) is -x */
2431 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2432 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2433 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2434 && GET_MODE (op0) == mode)
2436 REAL_VALUE_TYPE d;
2437 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2439 if (REAL_VALUES_EQUAL (d, dconst2))
2440 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2442 if (!HONOR_SNANS (mode)
2443 && REAL_VALUES_EQUAL (d, dconstm1))
2444 return simplify_gen_unary (NEG, mode, op0, mode);
2447 /* Optimize -x * -x as x * x. */
2448 if (FLOAT_MODE_P (mode)
2449 && GET_CODE (op0) == NEG
2450 && GET_CODE (op1) == NEG
2451 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2452 && !side_effects_p (XEXP (op0, 0)))
2453 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2455 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2456 if (SCALAR_FLOAT_MODE_P (mode)
2457 && GET_CODE (op0) == ABS
2458 && GET_CODE (op1) == ABS
2459 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2460 && !side_effects_p (XEXP (op0, 0)))
2461 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2463 /* Reassociate multiplication, but for floating point MULTs
2464 only when the user specifies unsafe math optimizations. */
2465 if (! FLOAT_MODE_P (mode)
2466 || flag_unsafe_math_optimizations)
2468 tem = simplify_associative_operation (code, mode, op0, op1);
2469 if (tem)
2470 return tem;
2472 break;
2474 case IOR:
2475 if (trueop1 == CONST0_RTX (mode))
2476 return op0;
2477 if (INTEGRAL_MODE_P (mode)
2478 && trueop1 == CONSTM1_RTX (mode)
2479 && !side_effects_p (op0))
2480 return op1;
2481 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2482 return op0;
2483 /* A | (~A) -> -1 */
2484 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2485 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2486 && ! side_effects_p (op0)
2487 && SCALAR_INT_MODE_P (mode))
2488 return constm1_rtx;
2490 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2491 if (CONST_INT_P (op1)
2492 && HWI_COMPUTABLE_MODE_P (mode)
2493 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2494 && !side_effects_p (op0))
2495 return op1;
2497 /* Canonicalize (X & C1) | C2. */
2498 if (GET_CODE (op0) == AND
2499 && CONST_INT_P (trueop1)
2500 && CONST_INT_P (XEXP (op0, 1)))
2502 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2503 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2504 HOST_WIDE_INT c2 = INTVAL (trueop1);
2506 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2507 if ((c1 & c2) == c1
2508 && !side_effects_p (XEXP (op0, 0)))
2509 return trueop1;
2511 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2512 if (((c1|c2) & mask) == mask)
2513 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2515 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2516 if (((c1 & ~c2) & mask) != (c1 & mask))
2518 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2519 gen_int_mode (c1 & ~c2, mode));
2520 return simplify_gen_binary (IOR, mode, tem, op1);
2524 /* Convert (A & B) | A to A. */
2525 if (GET_CODE (op0) == AND
2526 && (rtx_equal_p (XEXP (op0, 0), op1)
2527 || rtx_equal_p (XEXP (op0, 1), op1))
2528 && ! side_effects_p (XEXP (op0, 0))
2529 && ! side_effects_p (XEXP (op0, 1)))
2530 return op1;
2532 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2533 mode size to (rotate A CX). */
2535 if (GET_CODE (op1) == ASHIFT
2536 || GET_CODE (op1) == SUBREG)
2538 opleft = op1;
2539 opright = op0;
2541 else
2543 opright = op1;
2544 opleft = op0;
2547 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2548 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2549 && CONST_INT_P (XEXP (opleft, 1))
2550 && CONST_INT_P (XEXP (opright, 1))
2551 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2552 == GET_MODE_PRECISION (mode)))
2553 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2555 /* Same, but for ashift that has been "simplified" to a wider mode
2556 by simplify_shift_const. */
2558 if (GET_CODE (opleft) == SUBREG
2559 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2560 && GET_CODE (opright) == LSHIFTRT
2561 && GET_CODE (XEXP (opright, 0)) == SUBREG
2562 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2563 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2564 && (GET_MODE_SIZE (GET_MODE (opleft))
2565 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2566 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2567 SUBREG_REG (XEXP (opright, 0)))
2568 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2569 && CONST_INT_P (XEXP (opright, 1))
2570 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2571 == GET_MODE_PRECISION (mode)))
2572 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2573 XEXP (SUBREG_REG (opleft), 1));
2575 /* If we have (ior (and (X C1) C2)), simplify this by making
2576 C1 as small as possible if C1 actually changes. */
2577 if (CONST_INT_P (op1)
2578 && (HWI_COMPUTABLE_MODE_P (mode)
2579 || INTVAL (op1) > 0)
2580 && GET_CODE (op0) == AND
2581 && CONST_INT_P (XEXP (op0, 1))
2582 && CONST_INT_P (op1)
2583 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2585 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2586 gen_int_mode (UINTVAL (XEXP (op0, 1))
2587 & ~UINTVAL (op1),
2588 mode));
2589 return simplify_gen_binary (IOR, mode, tmp, op1);
2592 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2593 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2594 the PLUS does not affect any of the bits in OP1: then we can do
2595 the IOR as a PLUS and we can associate. This is valid if OP1
2596 can be safely shifted left C bits. */
2597 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2598 && GET_CODE (XEXP (op0, 0)) == PLUS
2599 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2600 && CONST_INT_P (XEXP (op0, 1))
2601 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2603 int count = INTVAL (XEXP (op0, 1));
2604 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2606 if (mask >> count == INTVAL (trueop1)
2607 && trunc_int_for_mode (mask, mode) == mask
2608 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2609 return simplify_gen_binary (ASHIFTRT, mode,
2610 plus_constant (mode, XEXP (op0, 0),
2611 mask),
2612 XEXP (op0, 1));
2615 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2616 if (tem)
2617 return tem;
2619 tem = simplify_associative_operation (code, mode, op0, op1);
2620 if (tem)
2621 return tem;
2622 break;
2624 case XOR:
2625 if (trueop1 == CONST0_RTX (mode))
2626 return op0;
2627 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2628 return simplify_gen_unary (NOT, mode, op0, mode);
2629 if (rtx_equal_p (trueop0, trueop1)
2630 && ! side_effects_p (op0)
2631 && GET_MODE_CLASS (mode) != MODE_CC)
2632 return CONST0_RTX (mode);
2634 /* Canonicalize XOR of the most significant bit to PLUS. */
2635 if (CONST_SCALAR_INT_P (op1)
2636 && mode_signbit_p (mode, op1))
2637 return simplify_gen_binary (PLUS, mode, op0, op1);
2638 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2639 if (CONST_SCALAR_INT_P (op1)
2640 && GET_CODE (op0) == PLUS
2641 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2642 && mode_signbit_p (mode, XEXP (op0, 1)))
2643 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2644 simplify_gen_binary (XOR, mode, op1,
2645 XEXP (op0, 1)));
2647 /* If we are XORing two things that have no bits in common,
2648 convert them into an IOR. This helps to detect rotation encoded
2649 using those methods and possibly other simplifications. */
2651 if (HWI_COMPUTABLE_MODE_P (mode)
2652 && (nonzero_bits (op0, mode)
2653 & nonzero_bits (op1, mode)) == 0)
2654 return (simplify_gen_binary (IOR, mode, op0, op1));
2656 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2657 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2658 (NOT y). */
2660 int num_negated = 0;
2662 if (GET_CODE (op0) == NOT)
2663 num_negated++, op0 = XEXP (op0, 0);
2664 if (GET_CODE (op1) == NOT)
2665 num_negated++, op1 = XEXP (op1, 0);
2667 if (num_negated == 2)
2668 return simplify_gen_binary (XOR, mode, op0, op1);
2669 else if (num_negated == 1)
2670 return simplify_gen_unary (NOT, mode,
2671 simplify_gen_binary (XOR, mode, op0, op1),
2672 mode);
2675 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2676 correspond to a machine insn or result in further simplifications
2677 if B is a constant. */
2679 if (GET_CODE (op0) == AND
2680 && rtx_equal_p (XEXP (op0, 1), op1)
2681 && ! side_effects_p (op1))
2682 return simplify_gen_binary (AND, mode,
2683 simplify_gen_unary (NOT, mode,
2684 XEXP (op0, 0), mode),
2685 op1);
2687 else if (GET_CODE (op0) == AND
2688 && rtx_equal_p (XEXP (op0, 0), op1)
2689 && ! side_effects_p (op1))
2690 return simplify_gen_binary (AND, mode,
2691 simplify_gen_unary (NOT, mode,
2692 XEXP (op0, 1), mode),
2693 op1);
2695 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2696 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2697 out bits inverted twice and not set by C. Similarly, given
2698 (xor (and (xor A B) C) D), simplify without inverting C in
2699 the xor operand: (xor (and A C) (B&C)^D).
2701 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2702 && GET_CODE (XEXP (op0, 0)) == XOR
2703 && CONST_INT_P (op1)
2704 && CONST_INT_P (XEXP (op0, 1))
2705 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2707 enum rtx_code op = GET_CODE (op0);
2708 rtx a = XEXP (XEXP (op0, 0), 0);
2709 rtx b = XEXP (XEXP (op0, 0), 1);
2710 rtx c = XEXP (op0, 1);
2711 rtx d = op1;
2712 HOST_WIDE_INT bval = INTVAL (b);
2713 HOST_WIDE_INT cval = INTVAL (c);
2714 HOST_WIDE_INT dval = INTVAL (d);
2715 HOST_WIDE_INT xcval;
2717 if (op == IOR)
2718 xcval = ~cval;
2719 else
2720 xcval = cval;
2722 return simplify_gen_binary (XOR, mode,
2723 simplify_gen_binary (op, mode, a, c),
2724 gen_int_mode ((bval & xcval) ^ dval,
2725 mode));
2728 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2729 we can transform like this:
2730 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2731 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2732 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2733 Attempt a few simplifications when B and C are both constants. */
2734 if (GET_CODE (op0) == AND
2735 && CONST_INT_P (op1)
2736 && CONST_INT_P (XEXP (op0, 1)))
2738 rtx a = XEXP (op0, 0);
2739 rtx b = XEXP (op0, 1);
2740 rtx c = op1;
2741 HOST_WIDE_INT bval = INTVAL (b);
2742 HOST_WIDE_INT cval = INTVAL (c);
2744 /* Instead of computing ~A&C, we compute its negated value,
2745 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2746 optimize for sure. If it does not simplify, we still try
2747 to compute ~A&C below, but since that always allocates
2748 RTL, we don't try that before committing to returning a
2749 simplified expression. */
2750 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2751 GEN_INT (~cval));
2753 if ((~cval & bval) == 0)
2755 rtx na_c = NULL_RTX;
2756 if (n_na_c)
2757 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2758 else
2760 /* If ~A does not simplify, don't bother: we don't
2761 want to simplify 2 operations into 3, and if na_c
2762 were to simplify with na, n_na_c would have
2763 simplified as well. */
2764 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2765 if (na)
2766 na_c = simplify_gen_binary (AND, mode, na, c);
2769 /* Try to simplify ~A&C | ~B&C. */
2770 if (na_c != NULL_RTX)
2771 return simplify_gen_binary (IOR, mode, na_c,
2772 gen_int_mode (~bval & cval, mode));
2774 else
2776 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2777 if (n_na_c == CONSTM1_RTX (mode))
2779 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2780 gen_int_mode (~cval & bval,
2781 mode));
2782 return simplify_gen_binary (IOR, mode, a_nc_b,
2783 gen_int_mode (~bval & cval,
2784 mode));
2789 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2790 comparison if STORE_FLAG_VALUE is 1. */
2791 if (STORE_FLAG_VALUE == 1
2792 && trueop1 == const1_rtx
2793 && COMPARISON_P (op0)
2794 && (reversed = reversed_comparison (op0, mode)))
2795 return reversed;
2797 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2798 is (lt foo (const_int 0)), so we can perform the above
2799 simplification if STORE_FLAG_VALUE is 1. */
2801 if (STORE_FLAG_VALUE == 1
2802 && trueop1 == const1_rtx
2803 && GET_CODE (op0) == LSHIFTRT
2804 && CONST_INT_P (XEXP (op0, 1))
2805 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2806 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2808 /* (xor (comparison foo bar) (const_int sign-bit))
2809 when STORE_FLAG_VALUE is the sign bit. */
2810 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2811 && trueop1 == const_true_rtx
2812 && COMPARISON_P (op0)
2813 && (reversed = reversed_comparison (op0, mode)))
2814 return reversed;
2816 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2817 if (tem)
2818 return tem;
2820 tem = simplify_associative_operation (code, mode, op0, op1);
2821 if (tem)
2822 return tem;
2823 break;
2825 case AND:
2826 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2827 return trueop1;
2828 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2829 return op0;
2830 if (HWI_COMPUTABLE_MODE_P (mode))
2832 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2833 HOST_WIDE_INT nzop1;
2834 if (CONST_INT_P (trueop1))
2836 HOST_WIDE_INT val1 = INTVAL (trueop1);
2837 /* If we are turning off bits already known off in OP0, we need
2838 not do an AND. */
2839 if ((nzop0 & ~val1) == 0)
2840 return op0;
2842 nzop1 = nonzero_bits (trueop1, mode);
2843 /* If we are clearing all the nonzero bits, the result is zero. */
2844 if ((nzop1 & nzop0) == 0
2845 && !side_effects_p (op0) && !side_effects_p (op1))
2846 return CONST0_RTX (mode);
2848 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2849 && GET_MODE_CLASS (mode) != MODE_CC)
2850 return op0;
2851 /* A & (~A) -> 0 */
2852 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2853 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2854 && ! side_effects_p (op0)
2855 && GET_MODE_CLASS (mode) != MODE_CC)
2856 return CONST0_RTX (mode);
2858 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2859 there are no nonzero bits of C outside of X's mode. */
2860 if ((GET_CODE (op0) == SIGN_EXTEND
2861 || GET_CODE (op0) == ZERO_EXTEND)
2862 && CONST_INT_P (trueop1)
2863 && HWI_COMPUTABLE_MODE_P (mode)
2864 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2865 & UINTVAL (trueop1)) == 0)
2867 machine_mode imode = GET_MODE (XEXP (op0, 0));
2868 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2869 gen_int_mode (INTVAL (trueop1),
2870 imode));
2871 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2874 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2875 we might be able to further simplify the AND with X and potentially
2876 remove the truncation altogether. */
2877 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2879 rtx x = XEXP (op0, 0);
2880 machine_mode xmode = GET_MODE (x);
2881 tem = simplify_gen_binary (AND, xmode, x,
2882 gen_int_mode (INTVAL (trueop1), xmode));
2883 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2886 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2887 if (GET_CODE (op0) == IOR
2888 && CONST_INT_P (trueop1)
2889 && CONST_INT_P (XEXP (op0, 1)))
2891 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2892 return simplify_gen_binary (IOR, mode,
2893 simplify_gen_binary (AND, mode,
2894 XEXP (op0, 0), op1),
2895 gen_int_mode (tmp, mode));
2898 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2899 insn (and may simplify more). */
2900 if (GET_CODE (op0) == XOR
2901 && rtx_equal_p (XEXP (op0, 0), op1)
2902 && ! side_effects_p (op1))
2903 return simplify_gen_binary (AND, mode,
2904 simplify_gen_unary (NOT, mode,
2905 XEXP (op0, 1), mode),
2906 op1);
2908 if (GET_CODE (op0) == XOR
2909 && rtx_equal_p (XEXP (op0, 1), op1)
2910 && ! side_effects_p (op1))
2911 return simplify_gen_binary (AND, mode,
2912 simplify_gen_unary (NOT, mode,
2913 XEXP (op0, 0), mode),
2914 op1);
2916 /* Similarly for (~(A ^ B)) & A. */
2917 if (GET_CODE (op0) == NOT
2918 && GET_CODE (XEXP (op0, 0)) == XOR
2919 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2920 && ! side_effects_p (op1))
2921 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2923 if (GET_CODE (op0) == NOT
2924 && GET_CODE (XEXP (op0, 0)) == XOR
2925 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2926 && ! side_effects_p (op1))
2927 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2929 /* Convert (A | B) & A to A. */
2930 if (GET_CODE (op0) == IOR
2931 && (rtx_equal_p (XEXP (op0, 0), op1)
2932 || rtx_equal_p (XEXP (op0, 1), op1))
2933 && ! side_effects_p (XEXP (op0, 0))
2934 && ! side_effects_p (XEXP (op0, 1)))
2935 return op1;
2937 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2938 ((A & N) + B) & M -> (A + B) & M
2939 Similarly if (N & M) == 0,
2940 ((A | N) + B) & M -> (A + B) & M
2941 and for - instead of + and/or ^ instead of |.
2942 Also, if (N & M) == 0, then
2943 (A +- N) & M -> A & M. */
2944 if (CONST_INT_P (trueop1)
2945 && HWI_COMPUTABLE_MODE_P (mode)
2946 && ~UINTVAL (trueop1)
2947 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2948 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2950 rtx pmop[2];
2951 int which;
2953 pmop[0] = XEXP (op0, 0);
2954 pmop[1] = XEXP (op0, 1);
2956 if (CONST_INT_P (pmop[1])
2957 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2958 return simplify_gen_binary (AND, mode, pmop[0], op1);
2960 for (which = 0; which < 2; which++)
2962 tem = pmop[which];
2963 switch (GET_CODE (tem))
2965 case AND:
2966 if (CONST_INT_P (XEXP (tem, 1))
2967 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2968 == UINTVAL (trueop1))
2969 pmop[which] = XEXP (tem, 0);
2970 break;
2971 case IOR:
2972 case XOR:
2973 if (CONST_INT_P (XEXP (tem, 1))
2974 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2975 pmop[which] = XEXP (tem, 0);
2976 break;
2977 default:
2978 break;
2982 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2984 tem = simplify_gen_binary (GET_CODE (op0), mode,
2985 pmop[0], pmop[1]);
2986 return simplify_gen_binary (code, mode, tem, op1);
2990 /* (and X (ior (not X) Y) -> (and X Y) */
2991 if (GET_CODE (op1) == IOR
2992 && GET_CODE (XEXP (op1, 0)) == NOT
2993 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
2994 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2996 /* (and (ior (not X) Y) X) -> (and X Y) */
2997 if (GET_CODE (op0) == IOR
2998 && GET_CODE (XEXP (op0, 0)) == NOT
2999 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3000 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3002 /* (and X (ior Y (not X)) -> (and X Y) */
3003 if (GET_CODE (op1) == IOR
3004 && GET_CODE (XEXP (op1, 1)) == NOT
3005 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3006 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3008 /* (and (ior Y (not X)) X) -> (and X Y) */
3009 if (GET_CODE (op0) == IOR
3010 && GET_CODE (XEXP (op0, 1)) == NOT
3011 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3012 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3014 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3015 if (tem)
3016 return tem;
3018 tem = simplify_associative_operation (code, mode, op0, op1);
3019 if (tem)
3020 return tem;
3021 break;
3023 case UDIV:
3024 /* 0/x is 0 (or x&0 if x has side-effects). */
3025 if (trueop0 == CONST0_RTX (mode))
3027 if (side_effects_p (op1))
3028 return simplify_gen_binary (AND, mode, op1, trueop0);
3029 return trueop0;
3031 /* x/1 is x. */
3032 if (trueop1 == CONST1_RTX (mode))
3034 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3035 if (tem)
3036 return tem;
3038 /* Convert divide by power of two into shift. */
3039 if (CONST_INT_P (trueop1)
3040 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3041 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3042 break;
3044 case DIV:
3045 /* Handle floating point and integers separately. */
3046 if (SCALAR_FLOAT_MODE_P (mode))
3048 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3049 safe for modes with NaNs, since 0.0 / 0.0 will then be
3050 NaN rather than 0.0. Nor is it safe for modes with signed
3051 zeros, since dividing 0 by a negative number gives -0.0 */
3052 if (trueop0 == CONST0_RTX (mode)
3053 && !HONOR_NANS (mode)
3054 && !HONOR_SIGNED_ZEROS (mode)
3055 && ! side_effects_p (op1))
3056 return op0;
3057 /* x/1.0 is x. */
3058 if (trueop1 == CONST1_RTX (mode)
3059 && !HONOR_SNANS (mode))
3060 return op0;
3062 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3063 && trueop1 != CONST0_RTX (mode))
3065 REAL_VALUE_TYPE d;
3066 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3068 /* x/-1.0 is -x. */
3069 if (REAL_VALUES_EQUAL (d, dconstm1)
3070 && !HONOR_SNANS (mode))
3071 return simplify_gen_unary (NEG, mode, op0, mode);
3073 /* Change FP division by a constant into multiplication.
3074 Only do this with -freciprocal-math. */
3075 if (flag_reciprocal_math
3076 && !REAL_VALUES_EQUAL (d, dconst0))
3078 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3079 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3080 return simplify_gen_binary (MULT, mode, op0, tem);
3084 else if (SCALAR_INT_MODE_P (mode))
3086 /* 0/x is 0 (or x&0 if x has side-effects). */
3087 if (trueop0 == CONST0_RTX (mode)
3088 && !cfun->can_throw_non_call_exceptions)
3090 if (side_effects_p (op1))
3091 return simplify_gen_binary (AND, mode, op1, trueop0);
3092 return trueop0;
3094 /* x/1 is x. */
3095 if (trueop1 == CONST1_RTX (mode))
3097 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3098 if (tem)
3099 return tem;
3101 /* x/-1 is -x. */
3102 if (trueop1 == constm1_rtx)
3104 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3105 if (x)
3106 return simplify_gen_unary (NEG, mode, x, mode);
3109 break;
3111 case UMOD:
3112 /* 0%x is 0 (or x&0 if x has side-effects). */
3113 if (trueop0 == CONST0_RTX (mode))
3115 if (side_effects_p (op1))
3116 return simplify_gen_binary (AND, mode, op1, trueop0);
3117 return trueop0;
3119 /* x%1 is 0 (of x&0 if x has side-effects). */
3120 if (trueop1 == CONST1_RTX (mode))
3122 if (side_effects_p (op0))
3123 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3124 return CONST0_RTX (mode);
3126 /* Implement modulus by power of two as AND. */
3127 if (CONST_INT_P (trueop1)
3128 && exact_log2 (UINTVAL (trueop1)) > 0)
3129 return simplify_gen_binary (AND, mode, op0,
3130 gen_int_mode (INTVAL (op1) - 1, mode));
3131 break;
3133 case MOD:
3134 /* 0%x is 0 (or x&0 if x has side-effects). */
3135 if (trueop0 == CONST0_RTX (mode))
3137 if (side_effects_p (op1))
3138 return simplify_gen_binary (AND, mode, op1, trueop0);
3139 return trueop0;
3141 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3142 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3144 if (side_effects_p (op0))
3145 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3146 return CONST0_RTX (mode);
3148 break;
3150 case ROTATERT:
3151 case ROTATE:
3152 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3153 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3154 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3155 amount instead. */
3156 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3157 if (CONST_INT_P (trueop1)
3158 && IN_RANGE (INTVAL (trueop1),
3159 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3160 GET_MODE_PRECISION (mode) - 1))
3161 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3162 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3163 - INTVAL (trueop1)));
3164 #endif
3165 /* FALLTHRU */
3166 case ASHIFTRT:
3167 if (trueop1 == CONST0_RTX (mode))
3168 return op0;
3169 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3170 return op0;
3171 /* Rotating ~0 always results in ~0. */
3172 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3173 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3174 && ! side_effects_p (op1))
3175 return op0;
3176 /* Given:
3177 scalar modes M1, M2
3178 scalar constants c1, c2
3179 size (M2) > size (M1)
3180 c1 == size (M2) - size (M1)
3181 optimize:
3182 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3183 <low_part>)
3184 (const_int <c2>))
3186 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3187 <low_part>). */
3188 if (code == ASHIFTRT
3189 && !VECTOR_MODE_P (mode)
3190 && SUBREG_P (op0)
3191 && CONST_INT_P (op1)
3192 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3193 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3194 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3195 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3196 > GET_MODE_BITSIZE (mode))
3197 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3198 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3199 - GET_MODE_BITSIZE (mode)))
3200 && subreg_lowpart_p (op0))
3202 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3203 + INTVAL (op1));
3204 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3205 tmp = simplify_gen_binary (ASHIFTRT,
3206 GET_MODE (SUBREG_REG (op0)),
3207 XEXP (SUBREG_REG (op0), 0),
3208 tmp);
3209 return simplify_gen_subreg (mode, tmp, inner_mode,
3210 subreg_lowpart_offset (mode,
3211 inner_mode));
3213 canonicalize_shift:
3214 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3216 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3217 if (val != INTVAL (op1))
3218 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3220 break;
3222 case ASHIFT:
3223 case SS_ASHIFT:
3224 case US_ASHIFT:
3225 if (trueop1 == CONST0_RTX (mode))
3226 return op0;
3227 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3228 return op0;
3229 goto canonicalize_shift;
3231 case LSHIFTRT:
3232 if (trueop1 == CONST0_RTX (mode))
3233 return op0;
3234 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3235 return op0;
3236 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3237 if (GET_CODE (op0) == CLZ
3238 && CONST_INT_P (trueop1)
3239 && STORE_FLAG_VALUE == 1
3240 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3242 machine_mode imode = GET_MODE (XEXP (op0, 0));
3243 unsigned HOST_WIDE_INT zero_val = 0;
3245 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3246 && zero_val == GET_MODE_PRECISION (imode)
3247 && INTVAL (trueop1) == exact_log2 (zero_val))
3248 return simplify_gen_relational (EQ, mode, imode,
3249 XEXP (op0, 0), const0_rtx);
3251 goto canonicalize_shift;
3253 case SMIN:
3254 if (width <= HOST_BITS_PER_WIDE_INT
3255 && mode_signbit_p (mode, trueop1)
3256 && ! side_effects_p (op0))
3257 return op1;
3258 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3259 return op0;
3260 tem = simplify_associative_operation (code, mode, op0, op1);
3261 if (tem)
3262 return tem;
3263 break;
3265 case SMAX:
3266 if (width <= HOST_BITS_PER_WIDE_INT
3267 && CONST_INT_P (trueop1)
3268 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3269 && ! side_effects_p (op0))
3270 return op1;
3271 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3272 return op0;
3273 tem = simplify_associative_operation (code, mode, op0, op1);
3274 if (tem)
3275 return tem;
3276 break;
3278 case UMIN:
3279 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3280 return op1;
3281 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3282 return op0;
3283 tem = simplify_associative_operation (code, mode, op0, op1);
3284 if (tem)
3285 return tem;
3286 break;
3288 case UMAX:
3289 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3290 return op1;
3291 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3292 return op0;
3293 tem = simplify_associative_operation (code, mode, op0, op1);
3294 if (tem)
3295 return tem;
3296 break;
3298 case SS_PLUS:
3299 case US_PLUS:
3300 case SS_MINUS:
3301 case US_MINUS:
3302 case SS_MULT:
3303 case US_MULT:
3304 case SS_DIV:
3305 case US_DIV:
3306 /* ??? There are simplifications that can be done. */
3307 return 0;
3309 case VEC_SELECT:
3310 if (!VECTOR_MODE_P (mode))
3312 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3313 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3314 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3315 gcc_assert (XVECLEN (trueop1, 0) == 1);
3316 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3318 if (GET_CODE (trueop0) == CONST_VECTOR)
3319 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3320 (trueop1, 0, 0)));
3322 /* Extract a scalar element from a nested VEC_SELECT expression
3323 (with optional nested VEC_CONCAT expression). Some targets
3324 (i386) extract scalar element from a vector using chain of
3325 nested VEC_SELECT expressions. When input operand is a memory
3326 operand, this operation can be simplified to a simple scalar
3327 load from an offseted memory address. */
3328 if (GET_CODE (trueop0) == VEC_SELECT)
3330 rtx op0 = XEXP (trueop0, 0);
3331 rtx op1 = XEXP (trueop0, 1);
3333 machine_mode opmode = GET_MODE (op0);
3334 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3335 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3337 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3338 int elem;
3340 rtvec vec;
3341 rtx tmp_op, tmp;
3343 gcc_assert (GET_CODE (op1) == PARALLEL);
3344 gcc_assert (i < n_elts);
3346 /* Select element, pointed by nested selector. */
3347 elem = INTVAL (XVECEXP (op1, 0, i));
3349 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3350 if (GET_CODE (op0) == VEC_CONCAT)
3352 rtx op00 = XEXP (op0, 0);
3353 rtx op01 = XEXP (op0, 1);
3355 machine_mode mode00, mode01;
3356 int n_elts00, n_elts01;
3358 mode00 = GET_MODE (op00);
3359 mode01 = GET_MODE (op01);
3361 /* Find out number of elements of each operand. */
3362 if (VECTOR_MODE_P (mode00))
3364 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3365 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3367 else
3368 n_elts00 = 1;
3370 if (VECTOR_MODE_P (mode01))
3372 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3373 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3375 else
3376 n_elts01 = 1;
3378 gcc_assert (n_elts == n_elts00 + n_elts01);
3380 /* Select correct operand of VEC_CONCAT
3381 and adjust selector. */
3382 if (elem < n_elts01)
3383 tmp_op = op00;
3384 else
3386 tmp_op = op01;
3387 elem -= n_elts00;
3390 else
3391 tmp_op = op0;
3393 vec = rtvec_alloc (1);
3394 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3396 tmp = gen_rtx_fmt_ee (code, mode,
3397 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3398 return tmp;
3400 if (GET_CODE (trueop0) == VEC_DUPLICATE
3401 && GET_MODE (XEXP (trueop0, 0)) == mode)
3402 return XEXP (trueop0, 0);
3404 else
3406 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3407 gcc_assert (GET_MODE_INNER (mode)
3408 == GET_MODE_INNER (GET_MODE (trueop0)));
3409 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3411 if (GET_CODE (trueop0) == CONST_VECTOR)
3413 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3414 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3415 rtvec v = rtvec_alloc (n_elts);
3416 unsigned int i;
3418 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3419 for (i = 0; i < n_elts; i++)
3421 rtx x = XVECEXP (trueop1, 0, i);
3423 gcc_assert (CONST_INT_P (x));
3424 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3425 INTVAL (x));
3428 return gen_rtx_CONST_VECTOR (mode, v);
3431 /* Recognize the identity. */
3432 if (GET_MODE (trueop0) == mode)
3434 bool maybe_ident = true;
3435 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3437 rtx j = XVECEXP (trueop1, 0, i);
3438 if (!CONST_INT_P (j) || INTVAL (j) != i)
3440 maybe_ident = false;
3441 break;
3444 if (maybe_ident)
3445 return trueop0;
3448 /* If we build {a,b} then permute it, build the result directly. */
3449 if (XVECLEN (trueop1, 0) == 2
3450 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3451 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3452 && GET_CODE (trueop0) == VEC_CONCAT
3453 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3454 && GET_MODE (XEXP (trueop0, 0)) == mode
3455 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3456 && GET_MODE (XEXP (trueop0, 1)) == mode)
3458 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3459 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3460 rtx subop0, subop1;
3462 gcc_assert (i0 < 4 && i1 < 4);
3463 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3464 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3466 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3469 if (XVECLEN (trueop1, 0) == 2
3470 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3471 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3472 && GET_CODE (trueop0) == VEC_CONCAT
3473 && GET_MODE (trueop0) == mode)
3475 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3476 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3477 rtx subop0, subop1;
3479 gcc_assert (i0 < 2 && i1 < 2);
3480 subop0 = XEXP (trueop0, i0);
3481 subop1 = XEXP (trueop0, i1);
3483 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3486 /* If we select one half of a vec_concat, return that. */
3487 if (GET_CODE (trueop0) == VEC_CONCAT
3488 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3490 rtx subop0 = XEXP (trueop0, 0);
3491 rtx subop1 = XEXP (trueop0, 1);
3492 machine_mode mode0 = GET_MODE (subop0);
3493 machine_mode mode1 = GET_MODE (subop1);
3494 int li = GET_MODE_SIZE (GET_MODE_INNER (mode0));
3495 int l0 = GET_MODE_SIZE (mode0) / li;
3496 int l1 = GET_MODE_SIZE (mode1) / li;
3497 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3498 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3500 bool success = true;
3501 for (int i = 1; i < l0; ++i)
3503 rtx j = XVECEXP (trueop1, 0, i);
3504 if (!CONST_INT_P (j) || INTVAL (j) != i)
3506 success = false;
3507 break;
3510 if (success)
3511 return subop0;
3513 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3515 bool success = true;
3516 for (int i = 1; i < l1; ++i)
3518 rtx j = XVECEXP (trueop1, 0, i);
3519 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3521 success = false;
3522 break;
3525 if (success)
3526 return subop1;
3531 if (XVECLEN (trueop1, 0) == 1
3532 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3533 && GET_CODE (trueop0) == VEC_CONCAT)
3535 rtx vec = trueop0;
3536 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3538 /* Try to find the element in the VEC_CONCAT. */
3539 while (GET_MODE (vec) != mode
3540 && GET_CODE (vec) == VEC_CONCAT)
3542 HOST_WIDE_INT vec_size;
3544 if (CONST_INT_P (XEXP (vec, 0)))
3546 /* vec_concat of two const_ints doesn't make sense with
3547 respect to modes. */
3548 if (CONST_INT_P (XEXP (vec, 1)))
3549 return 0;
3551 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3552 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3554 else
3555 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3557 if (offset < vec_size)
3558 vec = XEXP (vec, 0);
3559 else
3561 offset -= vec_size;
3562 vec = XEXP (vec, 1);
3564 vec = avoid_constant_pool_reference (vec);
3567 if (GET_MODE (vec) == mode)
3568 return vec;
3571 /* If we select elements in a vec_merge that all come from the same
3572 operand, select from that operand directly. */
3573 if (GET_CODE (op0) == VEC_MERGE)
3575 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3576 if (CONST_INT_P (trueop02))
3578 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3579 bool all_operand0 = true;
3580 bool all_operand1 = true;
3581 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3583 rtx j = XVECEXP (trueop1, 0, i);
3584 if (sel & (1 << UINTVAL (j)))
3585 all_operand1 = false;
3586 else
3587 all_operand0 = false;
3589 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3590 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3591 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3592 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3596 /* If we have two nested selects that are inverses of each
3597 other, replace them with the source operand. */
3598 if (GET_CODE (trueop0) == VEC_SELECT
3599 && GET_MODE (XEXP (trueop0, 0)) == mode)
3601 rtx op0_subop1 = XEXP (trueop0, 1);
3602 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3603 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3605 /* Apply the outer ordering vector to the inner one. (The inner
3606 ordering vector is expressly permitted to be of a different
3607 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3608 then the two VEC_SELECTs cancel. */
3609 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3611 rtx x = XVECEXP (trueop1, 0, i);
3612 if (!CONST_INT_P (x))
3613 return 0;
3614 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3615 if (!CONST_INT_P (y) || i != INTVAL (y))
3616 return 0;
3618 return XEXP (trueop0, 0);
3621 return 0;
3622 case VEC_CONCAT:
3624 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3625 ? GET_MODE (trueop0)
3626 : GET_MODE_INNER (mode));
3627 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3628 ? GET_MODE (trueop1)
3629 : GET_MODE_INNER (mode));
3631 gcc_assert (VECTOR_MODE_P (mode));
3632 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3633 == GET_MODE_SIZE (mode));
3635 if (VECTOR_MODE_P (op0_mode))
3636 gcc_assert (GET_MODE_INNER (mode)
3637 == GET_MODE_INNER (op0_mode));
3638 else
3639 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3641 if (VECTOR_MODE_P (op1_mode))
3642 gcc_assert (GET_MODE_INNER (mode)
3643 == GET_MODE_INNER (op1_mode));
3644 else
3645 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3647 if ((GET_CODE (trueop0) == CONST_VECTOR
3648 || CONST_SCALAR_INT_P (trueop0)
3649 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3650 && (GET_CODE (trueop1) == CONST_VECTOR
3651 || CONST_SCALAR_INT_P (trueop1)
3652 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3654 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3655 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3656 rtvec v = rtvec_alloc (n_elts);
3657 unsigned int i;
3658 unsigned in_n_elts = 1;
3660 if (VECTOR_MODE_P (op0_mode))
3661 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3662 for (i = 0; i < n_elts; i++)
3664 if (i < in_n_elts)
3666 if (!VECTOR_MODE_P (op0_mode))
3667 RTVEC_ELT (v, i) = trueop0;
3668 else
3669 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3671 else
3673 if (!VECTOR_MODE_P (op1_mode))
3674 RTVEC_ELT (v, i) = trueop1;
3675 else
3676 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3677 i - in_n_elts);
3681 return gen_rtx_CONST_VECTOR (mode, v);
3684 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3685 Restrict the transformation to avoid generating a VEC_SELECT with a
3686 mode unrelated to its operand. */
3687 if (GET_CODE (trueop0) == VEC_SELECT
3688 && GET_CODE (trueop1) == VEC_SELECT
3689 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3690 && GET_MODE (XEXP (trueop0, 0)) == mode)
3692 rtx par0 = XEXP (trueop0, 1);
3693 rtx par1 = XEXP (trueop1, 1);
3694 int len0 = XVECLEN (par0, 0);
3695 int len1 = XVECLEN (par1, 0);
3696 rtvec vec = rtvec_alloc (len0 + len1);
3697 for (int i = 0; i < len0; i++)
3698 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3699 for (int i = 0; i < len1; i++)
3700 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3701 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3702 gen_rtx_PARALLEL (VOIDmode, vec));
3705 return 0;
3707 default:
3708 gcc_unreachable ();
3711 return 0;
3715 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3716 rtx op0, rtx op1)
3718 unsigned int width = GET_MODE_PRECISION (mode);
3720 if (VECTOR_MODE_P (mode)
3721 && code != VEC_CONCAT
3722 && GET_CODE (op0) == CONST_VECTOR
3723 && GET_CODE (op1) == CONST_VECTOR)
3725 unsigned n_elts = GET_MODE_NUNITS (mode);
3726 machine_mode op0mode = GET_MODE (op0);
3727 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3728 machine_mode op1mode = GET_MODE (op1);
3729 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3730 rtvec v = rtvec_alloc (n_elts);
3731 unsigned int i;
3733 gcc_assert (op0_n_elts == n_elts);
3734 gcc_assert (op1_n_elts == n_elts);
3735 for (i = 0; i < n_elts; i++)
3737 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3738 CONST_VECTOR_ELT (op0, i),
3739 CONST_VECTOR_ELT (op1, i));
3740 if (!x)
3741 return 0;
3742 RTVEC_ELT (v, i) = x;
3745 return gen_rtx_CONST_VECTOR (mode, v);
3748 if (VECTOR_MODE_P (mode)
3749 && code == VEC_CONCAT
3750 && (CONST_SCALAR_INT_P (op0)
3751 || GET_CODE (op0) == CONST_FIXED
3752 || CONST_DOUBLE_AS_FLOAT_P (op0))
3753 && (CONST_SCALAR_INT_P (op1)
3754 || CONST_DOUBLE_AS_FLOAT_P (op1)
3755 || GET_CODE (op1) == CONST_FIXED))
3757 unsigned n_elts = GET_MODE_NUNITS (mode);
3758 rtvec v = rtvec_alloc (n_elts);
3760 gcc_assert (n_elts >= 2);
3761 if (n_elts == 2)
3763 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3764 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3766 RTVEC_ELT (v, 0) = op0;
3767 RTVEC_ELT (v, 1) = op1;
3769 else
3771 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3772 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3773 unsigned i;
3775 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3776 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3777 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3779 for (i = 0; i < op0_n_elts; ++i)
3780 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3781 for (i = 0; i < op1_n_elts; ++i)
3782 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3785 return gen_rtx_CONST_VECTOR (mode, v);
3788 if (SCALAR_FLOAT_MODE_P (mode)
3789 && CONST_DOUBLE_AS_FLOAT_P (op0)
3790 && CONST_DOUBLE_AS_FLOAT_P (op1)
3791 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3793 if (code == AND
3794 || code == IOR
3795 || code == XOR)
3797 long tmp0[4];
3798 long tmp1[4];
3799 REAL_VALUE_TYPE r;
3800 int i;
3802 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3803 GET_MODE (op0));
3804 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3805 GET_MODE (op1));
3806 for (i = 0; i < 4; i++)
3808 switch (code)
3810 case AND:
3811 tmp0[i] &= tmp1[i];
3812 break;
3813 case IOR:
3814 tmp0[i] |= tmp1[i];
3815 break;
3816 case XOR:
3817 tmp0[i] ^= tmp1[i];
3818 break;
3819 default:
3820 gcc_unreachable ();
3823 real_from_target (&r, tmp0, mode);
3824 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3826 else
3828 REAL_VALUE_TYPE f0, f1, value, result;
3829 bool inexact;
3831 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3832 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3833 real_convert (&f0, mode, &f0);
3834 real_convert (&f1, mode, &f1);
3836 if (HONOR_SNANS (mode)
3837 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3838 return 0;
3840 if (code == DIV
3841 && REAL_VALUES_EQUAL (f1, dconst0)
3842 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3843 return 0;
3845 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3846 && flag_trapping_math
3847 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3849 int s0 = REAL_VALUE_NEGATIVE (f0);
3850 int s1 = REAL_VALUE_NEGATIVE (f1);
3852 switch (code)
3854 case PLUS:
3855 /* Inf + -Inf = NaN plus exception. */
3856 if (s0 != s1)
3857 return 0;
3858 break;
3859 case MINUS:
3860 /* Inf - Inf = NaN plus exception. */
3861 if (s0 == s1)
3862 return 0;
3863 break;
3864 case DIV:
3865 /* Inf / Inf = NaN plus exception. */
3866 return 0;
3867 default:
3868 break;
3872 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3873 && flag_trapping_math
3874 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3875 || (REAL_VALUE_ISINF (f1)
3876 && REAL_VALUES_EQUAL (f0, dconst0))))
3877 /* Inf * 0 = NaN plus exception. */
3878 return 0;
3880 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3881 &f0, &f1);
3882 real_convert (&result, mode, &value);
3884 /* Don't constant fold this floating point operation if
3885 the result has overflowed and flag_trapping_math. */
3887 if (flag_trapping_math
3888 && MODE_HAS_INFINITIES (mode)
3889 && REAL_VALUE_ISINF (result)
3890 && !REAL_VALUE_ISINF (f0)
3891 && !REAL_VALUE_ISINF (f1))
3892 /* Overflow plus exception. */
3893 return 0;
3895 /* Don't constant fold this floating point operation if the
3896 result may dependent upon the run-time rounding mode and
3897 flag_rounding_math is set, or if GCC's software emulation
3898 is unable to accurately represent the result. */
3900 if ((flag_rounding_math
3901 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3902 && (inexact || !real_identical (&result, &value)))
3903 return NULL_RTX;
3905 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3909 /* We can fold some multi-word operations. */
3910 if ((GET_MODE_CLASS (mode) == MODE_INT
3911 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3912 && CONST_SCALAR_INT_P (op0)
3913 && CONST_SCALAR_INT_P (op1))
3915 wide_int result;
3916 bool overflow;
3917 rtx_mode_t pop0 = std::make_pair (op0, mode);
3918 rtx_mode_t pop1 = std::make_pair (op1, mode);
3920 #if TARGET_SUPPORTS_WIDE_INT == 0
3921 /* This assert keeps the simplification from producing a result
3922 that cannot be represented in a CONST_DOUBLE but a lot of
3923 upstream callers expect that this function never fails to
3924 simplify something and so you if you added this to the test
3925 above the code would die later anyway. If this assert
3926 happens, you just need to make the port support wide int. */
3927 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3928 #endif
3929 switch (code)
3931 case MINUS:
3932 result = wi::sub (pop0, pop1);
3933 break;
3935 case PLUS:
3936 result = wi::add (pop0, pop1);
3937 break;
3939 case MULT:
3940 result = wi::mul (pop0, pop1);
3941 break;
3943 case DIV:
3944 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3945 if (overflow)
3946 return NULL_RTX;
3947 break;
3949 case MOD:
3950 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3951 if (overflow)
3952 return NULL_RTX;
3953 break;
3955 case UDIV:
3956 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3957 if (overflow)
3958 return NULL_RTX;
3959 break;
3961 case UMOD:
3962 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3963 if (overflow)
3964 return NULL_RTX;
3965 break;
3967 case AND:
3968 result = wi::bit_and (pop0, pop1);
3969 break;
3971 case IOR:
3972 result = wi::bit_or (pop0, pop1);
3973 break;
3975 case XOR:
3976 result = wi::bit_xor (pop0, pop1);
3977 break;
3979 case SMIN:
3980 result = wi::smin (pop0, pop1);
3981 break;
3983 case SMAX:
3984 result = wi::smax (pop0, pop1);
3985 break;
3987 case UMIN:
3988 result = wi::umin (pop0, pop1);
3989 break;
3991 case UMAX:
3992 result = wi::umax (pop0, pop1);
3993 break;
3995 case LSHIFTRT:
3996 case ASHIFTRT:
3997 case ASHIFT:
3999 wide_int wop1 = pop1;
4000 if (SHIFT_COUNT_TRUNCATED)
4001 wop1 = wi::umod_trunc (wop1, width);
4002 else if (wi::geu_p (wop1, width))
4003 return NULL_RTX;
4005 switch (code)
4007 case LSHIFTRT:
4008 result = wi::lrshift (pop0, wop1);
4009 break;
4011 case ASHIFTRT:
4012 result = wi::arshift (pop0, wop1);
4013 break;
4015 case ASHIFT:
4016 result = wi::lshift (pop0, wop1);
4017 break;
4019 default:
4020 gcc_unreachable ();
4022 break;
4024 case ROTATE:
4025 case ROTATERT:
4027 if (wi::neg_p (pop1))
4028 return NULL_RTX;
4030 switch (code)
4032 case ROTATE:
4033 result = wi::lrotate (pop0, pop1);
4034 break;
4036 case ROTATERT:
4037 result = wi::rrotate (pop0, pop1);
4038 break;
4040 default:
4041 gcc_unreachable ();
4043 break;
4045 default:
4046 return NULL_RTX;
4048 return immed_wide_int_const (result, mode);
4051 return NULL_RTX;
4056 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4057 PLUS or MINUS.
4059 Rather than test for specific case, we do this by a brute-force method
4060 and do all possible simplifications until no more changes occur. Then
4061 we rebuild the operation. */
4063 struct simplify_plus_minus_op_data
4065 rtx op;
4066 short neg;
4069 static bool
4070 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4072 int result;
4074 result = (commutative_operand_precedence (y)
4075 - commutative_operand_precedence (x));
4076 if (result)
4077 return result > 0;
4079 /* Group together equal REGs to do more simplification. */
4080 if (REG_P (x) && REG_P (y))
4081 return REGNO (x) > REGNO (y);
4082 else
4083 return false;
4086 static rtx
4087 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4088 rtx op1)
4090 struct simplify_plus_minus_op_data ops[16];
4091 rtx result, tem;
4092 int n_ops = 2;
4093 int changed, n_constants, canonicalized = 0;
4094 int i, j;
4096 memset (ops, 0, sizeof ops);
4098 /* Set up the two operands and then expand them until nothing has been
4099 changed. If we run out of room in our array, give up; this should
4100 almost never happen. */
4102 ops[0].op = op0;
4103 ops[0].neg = 0;
4104 ops[1].op = op1;
4105 ops[1].neg = (code == MINUS);
4109 changed = 0;
4110 n_constants = 0;
4112 for (i = 0; i < n_ops; i++)
4114 rtx this_op = ops[i].op;
4115 int this_neg = ops[i].neg;
4116 enum rtx_code this_code = GET_CODE (this_op);
4118 switch (this_code)
4120 case PLUS:
4121 case MINUS:
4122 if (n_ops == ARRAY_SIZE (ops))
4123 return NULL_RTX;
4125 ops[n_ops].op = XEXP (this_op, 1);
4126 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4127 n_ops++;
4129 ops[i].op = XEXP (this_op, 0);
4130 changed = 1;
4131 canonicalized |= this_neg || i != n_ops - 2;
4132 break;
4134 case NEG:
4135 ops[i].op = XEXP (this_op, 0);
4136 ops[i].neg = ! this_neg;
4137 changed = 1;
4138 canonicalized = 1;
4139 break;
4141 case CONST:
4142 if (n_ops != ARRAY_SIZE (ops)
4143 && GET_CODE (XEXP (this_op, 0)) == PLUS
4144 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4145 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4147 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4148 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4149 ops[n_ops].neg = this_neg;
4150 n_ops++;
4151 changed = 1;
4152 canonicalized = 1;
4154 break;
4156 case NOT:
4157 /* ~a -> (-a - 1) */
4158 if (n_ops != ARRAY_SIZE (ops))
4160 ops[n_ops].op = CONSTM1_RTX (mode);
4161 ops[n_ops++].neg = this_neg;
4162 ops[i].op = XEXP (this_op, 0);
4163 ops[i].neg = !this_neg;
4164 changed = 1;
4165 canonicalized = 1;
4167 break;
4169 case CONST_INT:
4170 n_constants++;
4171 if (this_neg)
4173 ops[i].op = neg_const_int (mode, this_op);
4174 ops[i].neg = 0;
4175 changed = 1;
4176 canonicalized = 1;
4178 break;
4180 default:
4181 break;
4185 while (changed);
4187 if (n_constants > 1)
4188 canonicalized = 1;
4190 gcc_assert (n_ops >= 2);
4192 /* If we only have two operands, we can avoid the loops. */
4193 if (n_ops == 2)
4195 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4196 rtx lhs, rhs;
4198 /* Get the two operands. Be careful with the order, especially for
4199 the cases where code == MINUS. */
4200 if (ops[0].neg && ops[1].neg)
4202 lhs = gen_rtx_NEG (mode, ops[0].op);
4203 rhs = ops[1].op;
4205 else if (ops[0].neg)
4207 lhs = ops[1].op;
4208 rhs = ops[0].op;
4210 else
4212 lhs = ops[0].op;
4213 rhs = ops[1].op;
4216 return simplify_const_binary_operation (code, mode, lhs, rhs);
4219 /* Now simplify each pair of operands until nothing changes. */
4222 /* Insertion sort is good enough for a small array. */
4223 for (i = 1; i < n_ops; i++)
4225 struct simplify_plus_minus_op_data save;
4226 j = i - 1;
4227 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4228 continue;
4230 canonicalized = 1;
4231 save = ops[i];
4233 ops[j + 1] = ops[j];
4234 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4235 ops[j + 1] = save;
4238 changed = 0;
4239 for (i = n_ops - 1; i > 0; i--)
4240 for (j = i - 1; j >= 0; j--)
4242 rtx lhs = ops[j].op, rhs = ops[i].op;
4243 int lneg = ops[j].neg, rneg = ops[i].neg;
4245 if (lhs != 0 && rhs != 0)
4247 enum rtx_code ncode = PLUS;
4249 if (lneg != rneg)
4251 ncode = MINUS;
4252 if (lneg)
4253 std::swap (lhs, rhs);
4255 else if (swap_commutative_operands_p (lhs, rhs))
4256 std::swap (lhs, rhs);
4258 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4259 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4261 rtx tem_lhs, tem_rhs;
4263 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4264 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4265 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4267 if (tem && !CONSTANT_P (tem))
4268 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4270 else
4271 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4273 if (tem)
4275 /* Reject "simplifications" that just wrap the two
4276 arguments in a CONST. Failure to do so can result
4277 in infinite recursion with simplify_binary_operation
4278 when it calls us to simplify CONST operations.
4279 Also, if we find such a simplification, don't try
4280 any more combinations with this rhs: We must have
4281 something like symbol+offset, ie. one of the
4282 trivial CONST expressions we handle later. */
4283 if (GET_CODE (tem) == CONST
4284 && GET_CODE (XEXP (tem, 0)) == ncode
4285 && XEXP (XEXP (tem, 0), 0) == lhs
4286 && XEXP (XEXP (tem, 0), 1) == rhs)
4287 break;
4288 lneg &= rneg;
4289 if (GET_CODE (tem) == NEG)
4290 tem = XEXP (tem, 0), lneg = !lneg;
4291 if (CONST_INT_P (tem) && lneg)
4292 tem = neg_const_int (mode, tem), lneg = 0;
4294 ops[i].op = tem;
4295 ops[i].neg = lneg;
4296 ops[j].op = NULL_RTX;
4297 changed = 1;
4298 canonicalized = 1;
4303 /* If nothing changed, fail. */
4304 if (!canonicalized)
4305 return NULL_RTX;
4307 /* Pack all the operands to the lower-numbered entries. */
4308 for (i = 0, j = 0; j < n_ops; j++)
4309 if (ops[j].op)
4311 ops[i] = ops[j];
4312 i++;
4314 n_ops = i;
4316 while (changed);
4318 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4319 if (n_ops == 2
4320 && CONST_INT_P (ops[1].op)
4321 && CONSTANT_P (ops[0].op)
4322 && ops[0].neg)
4323 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4325 /* We suppressed creation of trivial CONST expressions in the
4326 combination loop to avoid recursion. Create one manually now.
4327 The combination loop should have ensured that there is exactly
4328 one CONST_INT, and the sort will have ensured that it is last
4329 in the array and that any other constant will be next-to-last. */
4331 if (n_ops > 1
4332 && CONST_INT_P (ops[n_ops - 1].op)
4333 && CONSTANT_P (ops[n_ops - 2].op))
4335 rtx value = ops[n_ops - 1].op;
4336 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4337 value = neg_const_int (mode, value);
4338 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4339 INTVAL (value));
4340 n_ops--;
4343 /* Put a non-negated operand first, if possible. */
4345 for (i = 0; i < n_ops && ops[i].neg; i++)
4346 continue;
4347 if (i == n_ops)
4348 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4349 else if (i != 0)
4351 tem = ops[0].op;
4352 ops[0] = ops[i];
4353 ops[i].op = tem;
4354 ops[i].neg = 1;
4357 /* Now make the result by performing the requested operations. */
4358 result = ops[0].op;
4359 for (i = 1; i < n_ops; i++)
4360 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4361 mode, result, ops[i].op);
4363 return result;
4366 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4367 static bool
4368 plus_minus_operand_p (const_rtx x)
4370 return GET_CODE (x) == PLUS
4371 || GET_CODE (x) == MINUS
4372 || (GET_CODE (x) == CONST
4373 && GET_CODE (XEXP (x, 0)) == PLUS
4374 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4375 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4378 /* Like simplify_binary_operation except used for relational operators.
4379 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4380 not also be VOIDmode.
4382 CMP_MODE specifies in which mode the comparison is done in, so it is
4383 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4384 the operands or, if both are VOIDmode, the operands are compared in
4385 "infinite precision". */
4387 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4388 machine_mode cmp_mode, rtx op0, rtx op1)
4390 rtx tem, trueop0, trueop1;
4392 if (cmp_mode == VOIDmode)
4393 cmp_mode = GET_MODE (op0);
4394 if (cmp_mode == VOIDmode)
4395 cmp_mode = GET_MODE (op1);
4397 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4398 if (tem)
4400 if (SCALAR_FLOAT_MODE_P (mode))
4402 if (tem == const0_rtx)
4403 return CONST0_RTX (mode);
4404 #ifdef FLOAT_STORE_FLAG_VALUE
4406 REAL_VALUE_TYPE val;
4407 val = FLOAT_STORE_FLAG_VALUE (mode);
4408 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4410 #else
4411 return NULL_RTX;
4412 #endif
4414 if (VECTOR_MODE_P (mode))
4416 if (tem == const0_rtx)
4417 return CONST0_RTX (mode);
4418 #ifdef VECTOR_STORE_FLAG_VALUE
4420 int i, units;
4421 rtvec v;
4423 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4424 if (val == NULL_RTX)
4425 return NULL_RTX;
4426 if (val == const1_rtx)
4427 return CONST1_RTX (mode);
4429 units = GET_MODE_NUNITS (mode);
4430 v = rtvec_alloc (units);
4431 for (i = 0; i < units; i++)
4432 RTVEC_ELT (v, i) = val;
4433 return gen_rtx_raw_CONST_VECTOR (mode, v);
4435 #else
4436 return NULL_RTX;
4437 #endif
4440 return tem;
4443 /* For the following tests, ensure const0_rtx is op1. */
4444 if (swap_commutative_operands_p (op0, op1)
4445 || (op0 == const0_rtx && op1 != const0_rtx))
4446 std::swap (op0, op1), code = swap_condition (code);
4448 /* If op0 is a compare, extract the comparison arguments from it. */
4449 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4450 return simplify_gen_relational (code, mode, VOIDmode,
4451 XEXP (op0, 0), XEXP (op0, 1));
4453 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4454 || CC0_P (op0))
4455 return NULL_RTX;
4457 trueop0 = avoid_constant_pool_reference (op0);
4458 trueop1 = avoid_constant_pool_reference (op1);
4459 return simplify_relational_operation_1 (code, mode, cmp_mode,
4460 trueop0, trueop1);
4463 /* This part of simplify_relational_operation is only used when CMP_MODE
4464 is not in class MODE_CC (i.e. it is a real comparison).
4466 MODE is the mode of the result, while CMP_MODE specifies in which
4467 mode the comparison is done in, so it is the mode of the operands. */
4469 static rtx
4470 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4471 machine_mode cmp_mode, rtx op0, rtx op1)
4473 enum rtx_code op0code = GET_CODE (op0);
4475 if (op1 == const0_rtx && COMPARISON_P (op0))
4477 /* If op0 is a comparison, extract the comparison arguments
4478 from it. */
4479 if (code == NE)
4481 if (GET_MODE (op0) == mode)
4482 return simplify_rtx (op0);
4483 else
4484 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4485 XEXP (op0, 0), XEXP (op0, 1));
4487 else if (code == EQ)
4489 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4490 if (new_code != UNKNOWN)
4491 return simplify_gen_relational (new_code, mode, VOIDmode,
4492 XEXP (op0, 0), XEXP (op0, 1));
4496 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4497 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4498 if ((code == LTU || code == GEU)
4499 && GET_CODE (op0) == PLUS
4500 && CONST_INT_P (XEXP (op0, 1))
4501 && (rtx_equal_p (op1, XEXP (op0, 0))
4502 || rtx_equal_p (op1, XEXP (op0, 1)))
4503 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4504 && XEXP (op0, 1) != const0_rtx)
4506 rtx new_cmp
4507 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4508 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4509 cmp_mode, XEXP (op0, 0), new_cmp);
4512 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4513 if ((code == LTU || code == GEU)
4514 && GET_CODE (op0) == PLUS
4515 && rtx_equal_p (op1, XEXP (op0, 1))
4516 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4517 && !rtx_equal_p (op1, XEXP (op0, 0)))
4518 return simplify_gen_relational (code, mode, cmp_mode, op0,
4519 copy_rtx (XEXP (op0, 0)));
4521 if (op1 == const0_rtx)
4523 /* Canonicalize (GTU x 0) as (NE x 0). */
4524 if (code == GTU)
4525 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4526 /* Canonicalize (LEU x 0) as (EQ x 0). */
4527 if (code == LEU)
4528 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4530 else if (op1 == const1_rtx)
4532 switch (code)
4534 case GE:
4535 /* Canonicalize (GE x 1) as (GT x 0). */
4536 return simplify_gen_relational (GT, mode, cmp_mode,
4537 op0, const0_rtx);
4538 case GEU:
4539 /* Canonicalize (GEU x 1) as (NE x 0). */
4540 return simplify_gen_relational (NE, mode, cmp_mode,
4541 op0, const0_rtx);
4542 case LT:
4543 /* Canonicalize (LT x 1) as (LE x 0). */
4544 return simplify_gen_relational (LE, mode, cmp_mode,
4545 op0, const0_rtx);
4546 case LTU:
4547 /* Canonicalize (LTU x 1) as (EQ x 0). */
4548 return simplify_gen_relational (EQ, mode, cmp_mode,
4549 op0, const0_rtx);
4550 default:
4551 break;
4554 else if (op1 == constm1_rtx)
4556 /* Canonicalize (LE x -1) as (LT x 0). */
4557 if (code == LE)
4558 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4559 /* Canonicalize (GT x -1) as (GE x 0). */
4560 if (code == GT)
4561 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4564 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4565 if ((code == EQ || code == NE)
4566 && (op0code == PLUS || op0code == MINUS)
4567 && CONSTANT_P (op1)
4568 && CONSTANT_P (XEXP (op0, 1))
4569 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4571 rtx x = XEXP (op0, 0);
4572 rtx c = XEXP (op0, 1);
4573 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4574 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4576 /* Detect an infinite recursive condition, where we oscillate at this
4577 simplification case between:
4578 A + B == C <---> C - B == A,
4579 where A, B, and C are all constants with non-simplifiable expressions,
4580 usually SYMBOL_REFs. */
4581 if (GET_CODE (tem) == invcode
4582 && CONSTANT_P (x)
4583 && rtx_equal_p (c, XEXP (tem, 1)))
4584 return NULL_RTX;
4586 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4589 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4590 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4591 if (code == NE
4592 && op1 == const0_rtx
4593 && GET_MODE_CLASS (mode) == MODE_INT
4594 && cmp_mode != VOIDmode
4595 /* ??? Work-around BImode bugs in the ia64 backend. */
4596 && mode != BImode
4597 && cmp_mode != BImode
4598 && nonzero_bits (op0, cmp_mode) == 1
4599 && STORE_FLAG_VALUE == 1)
4600 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4601 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4602 : lowpart_subreg (mode, op0, cmp_mode);
4604 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4605 if ((code == EQ || code == NE)
4606 && op1 == const0_rtx
4607 && op0code == XOR)
4608 return simplify_gen_relational (code, mode, cmp_mode,
4609 XEXP (op0, 0), XEXP (op0, 1));
4611 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4612 if ((code == EQ || code == NE)
4613 && op0code == XOR
4614 && rtx_equal_p (XEXP (op0, 0), op1)
4615 && !side_effects_p (XEXP (op0, 0)))
4616 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4617 CONST0_RTX (mode));
4619 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4620 if ((code == EQ || code == NE)
4621 && op0code == XOR
4622 && rtx_equal_p (XEXP (op0, 1), op1)
4623 && !side_effects_p (XEXP (op0, 1)))
4624 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4625 CONST0_RTX (mode));
4627 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4628 if ((code == EQ || code == NE)
4629 && op0code == XOR
4630 && CONST_SCALAR_INT_P (op1)
4631 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4632 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4633 simplify_gen_binary (XOR, cmp_mode,
4634 XEXP (op0, 1), op1));
4636 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4637 can be implemented with a BICS instruction on some targets, or
4638 constant-folded if y is a constant. */
4639 if ((code == EQ || code == NE)
4640 && op0code == AND
4641 && rtx_equal_p (XEXP (op0, 0), op1)
4642 && !side_effects_p (op1)
4643 && op1 != CONST0_RTX (cmp_mode))
4645 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4646 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4648 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4649 CONST0_RTX (cmp_mode));
4652 /* Likewise for (eq/ne (and x y) y). */
4653 if ((code == EQ || code == NE)
4654 && op0code == AND
4655 && rtx_equal_p (XEXP (op0, 1), op1)
4656 && !side_effects_p (op1)
4657 && op1 != CONST0_RTX (cmp_mode))
4659 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4660 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4662 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4663 CONST0_RTX (cmp_mode));
4666 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4667 if ((code == EQ || code == NE)
4668 && GET_CODE (op0) == BSWAP
4669 && CONST_SCALAR_INT_P (op1))
4670 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4671 simplify_gen_unary (BSWAP, cmp_mode,
4672 op1, cmp_mode));
4674 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4675 if ((code == EQ || code == NE)
4676 && GET_CODE (op0) == BSWAP
4677 && GET_CODE (op1) == BSWAP)
4678 return simplify_gen_relational (code, mode, cmp_mode,
4679 XEXP (op0, 0), XEXP (op1, 0));
4681 if (op0code == POPCOUNT && op1 == const0_rtx)
4682 switch (code)
4684 case EQ:
4685 case LE:
4686 case LEU:
4687 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4688 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4689 XEXP (op0, 0), const0_rtx);
4691 case NE:
4692 case GT:
4693 case GTU:
4694 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4695 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4696 XEXP (op0, 0), const0_rtx);
4698 default:
4699 break;
4702 return NULL_RTX;
4705 enum
4707 CMP_EQ = 1,
4708 CMP_LT = 2,
4709 CMP_GT = 4,
4710 CMP_LTU = 8,
4711 CMP_GTU = 16
4715 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4716 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4717 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4718 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4719 For floating-point comparisons, assume that the operands were ordered. */
4721 static rtx
4722 comparison_result (enum rtx_code code, int known_results)
4724 switch (code)
4726 case EQ:
4727 case UNEQ:
4728 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4729 case NE:
4730 case LTGT:
4731 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4733 case LT:
4734 case UNLT:
4735 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4736 case GE:
4737 case UNGE:
4738 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4740 case GT:
4741 case UNGT:
4742 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4743 case LE:
4744 case UNLE:
4745 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4747 case LTU:
4748 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4749 case GEU:
4750 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4752 case GTU:
4753 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4754 case LEU:
4755 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4757 case ORDERED:
4758 return const_true_rtx;
4759 case UNORDERED:
4760 return const0_rtx;
4761 default:
4762 gcc_unreachable ();
4766 /* Check if the given comparison (done in the given MODE) is actually
4767 a tautology or a contradiction. If the mode is VOID_mode, the
4768 comparison is done in "infinite precision". If no simplification
4769 is possible, this function returns zero. Otherwise, it returns
4770 either const_true_rtx or const0_rtx. */
4773 simplify_const_relational_operation (enum rtx_code code,
4774 machine_mode mode,
4775 rtx op0, rtx op1)
4777 rtx tem;
4778 rtx trueop0;
4779 rtx trueop1;
4781 gcc_assert (mode != VOIDmode
4782 || (GET_MODE (op0) == VOIDmode
4783 && GET_MODE (op1) == VOIDmode));
4785 /* If op0 is a compare, extract the comparison arguments from it. */
4786 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4788 op1 = XEXP (op0, 1);
4789 op0 = XEXP (op0, 0);
4791 if (GET_MODE (op0) != VOIDmode)
4792 mode = GET_MODE (op0);
4793 else if (GET_MODE (op1) != VOIDmode)
4794 mode = GET_MODE (op1);
4795 else
4796 return 0;
4799 /* We can't simplify MODE_CC values since we don't know what the
4800 actual comparison is. */
4801 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4802 return 0;
4804 /* Make sure the constant is second. */
4805 if (swap_commutative_operands_p (op0, op1))
4807 std::swap (op0, op1);
4808 code = swap_condition (code);
4811 trueop0 = avoid_constant_pool_reference (op0);
4812 trueop1 = avoid_constant_pool_reference (op1);
4814 /* For integer comparisons of A and B maybe we can simplify A - B and can
4815 then simplify a comparison of that with zero. If A and B are both either
4816 a register or a CONST_INT, this can't help; testing for these cases will
4817 prevent infinite recursion here and speed things up.
4819 We can only do this for EQ and NE comparisons as otherwise we may
4820 lose or introduce overflow which we cannot disregard as undefined as
4821 we do not know the signedness of the operation on either the left or
4822 the right hand side of the comparison. */
4824 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4825 && (code == EQ || code == NE)
4826 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4827 && (REG_P (op1) || CONST_INT_P (trueop1)))
4828 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4829 /* We cannot do this if tem is a nonzero address. */
4830 && ! nonzero_address_p (tem))
4831 return simplify_const_relational_operation (signed_condition (code),
4832 mode, tem, const0_rtx);
4834 if (! HONOR_NANS (mode) && code == ORDERED)
4835 return const_true_rtx;
4837 if (! HONOR_NANS (mode) && code == UNORDERED)
4838 return const0_rtx;
4840 /* For modes without NaNs, if the two operands are equal, we know the
4841 result except if they have side-effects. Even with NaNs we know
4842 the result of unordered comparisons and, if signaling NaNs are
4843 irrelevant, also the result of LT/GT/LTGT. */
4844 if ((! HONOR_NANS (trueop0)
4845 || code == UNEQ || code == UNLE || code == UNGE
4846 || ((code == LT || code == GT || code == LTGT)
4847 && ! HONOR_SNANS (trueop0)))
4848 && rtx_equal_p (trueop0, trueop1)
4849 && ! side_effects_p (trueop0))
4850 return comparison_result (code, CMP_EQ);
4852 /* If the operands are floating-point constants, see if we can fold
4853 the result. */
4854 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4855 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4856 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4858 REAL_VALUE_TYPE d0, d1;
4860 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4861 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4863 /* Comparisons are unordered iff at least one of the values is NaN. */
4864 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4865 switch (code)
4867 case UNEQ:
4868 case UNLT:
4869 case UNGT:
4870 case UNLE:
4871 case UNGE:
4872 case NE:
4873 case UNORDERED:
4874 return const_true_rtx;
4875 case EQ:
4876 case LT:
4877 case GT:
4878 case LE:
4879 case GE:
4880 case LTGT:
4881 case ORDERED:
4882 return const0_rtx;
4883 default:
4884 return 0;
4887 return comparison_result (code,
4888 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4889 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4892 /* Otherwise, see if the operands are both integers. */
4893 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4894 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4896 /* It would be nice if we really had a mode here. However, the
4897 largest int representable on the target is as good as
4898 infinite. */
4899 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4900 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4901 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4903 if (wi::eq_p (ptrueop0, ptrueop1))
4904 return comparison_result (code, CMP_EQ);
4905 else
4907 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4908 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4909 return comparison_result (code, cr);
4913 /* Optimize comparisons with upper and lower bounds. */
4914 if (HWI_COMPUTABLE_MODE_P (mode)
4915 && CONST_INT_P (trueop1))
4917 int sign;
4918 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4919 HOST_WIDE_INT val = INTVAL (trueop1);
4920 HOST_WIDE_INT mmin, mmax;
4922 if (code == GEU
4923 || code == LEU
4924 || code == GTU
4925 || code == LTU)
4926 sign = 0;
4927 else
4928 sign = 1;
4930 /* Get a reduced range if the sign bit is zero. */
4931 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4933 mmin = 0;
4934 mmax = nonzero;
4936 else
4938 rtx mmin_rtx, mmax_rtx;
4939 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4941 mmin = INTVAL (mmin_rtx);
4942 mmax = INTVAL (mmax_rtx);
4943 if (sign)
4945 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4947 mmin >>= (sign_copies - 1);
4948 mmax >>= (sign_copies - 1);
4952 switch (code)
4954 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4955 case GEU:
4956 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4957 return const_true_rtx;
4958 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4959 return const0_rtx;
4960 break;
4961 case GE:
4962 if (val <= mmin)
4963 return const_true_rtx;
4964 if (val > mmax)
4965 return const0_rtx;
4966 break;
4968 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4969 case LEU:
4970 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4971 return const_true_rtx;
4972 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4973 return const0_rtx;
4974 break;
4975 case LE:
4976 if (val >= mmax)
4977 return const_true_rtx;
4978 if (val < mmin)
4979 return const0_rtx;
4980 break;
4982 case EQ:
4983 /* x == y is always false for y out of range. */
4984 if (val < mmin || val > mmax)
4985 return const0_rtx;
4986 break;
4988 /* x > y is always false for y >= mmax, always true for y < mmin. */
4989 case GTU:
4990 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4991 return const0_rtx;
4992 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4993 return const_true_rtx;
4994 break;
4995 case GT:
4996 if (val >= mmax)
4997 return const0_rtx;
4998 if (val < mmin)
4999 return const_true_rtx;
5000 break;
5002 /* x < y is always false for y <= mmin, always true for y > mmax. */
5003 case LTU:
5004 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5005 return const0_rtx;
5006 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5007 return const_true_rtx;
5008 break;
5009 case LT:
5010 if (val <= mmin)
5011 return const0_rtx;
5012 if (val > mmax)
5013 return const_true_rtx;
5014 break;
5016 case NE:
5017 /* x != y is always true for y out of range. */
5018 if (val < mmin || val > mmax)
5019 return const_true_rtx;
5020 break;
5022 default:
5023 break;
5027 /* Optimize integer comparisons with zero. */
5028 if (trueop1 == const0_rtx)
5030 /* Some addresses are known to be nonzero. We don't know
5031 their sign, but equality comparisons are known. */
5032 if (nonzero_address_p (trueop0))
5034 if (code == EQ || code == LEU)
5035 return const0_rtx;
5036 if (code == NE || code == GTU)
5037 return const_true_rtx;
5040 /* See if the first operand is an IOR with a constant. If so, we
5041 may be able to determine the result of this comparison. */
5042 if (GET_CODE (op0) == IOR)
5044 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5045 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5047 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5048 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5049 && (UINTVAL (inner_const)
5050 & ((unsigned HOST_WIDE_INT) 1
5051 << sign_bitnum)));
5053 switch (code)
5055 case EQ:
5056 case LEU:
5057 return const0_rtx;
5058 case NE:
5059 case GTU:
5060 return const_true_rtx;
5061 case LT:
5062 case LE:
5063 if (has_sign)
5064 return const_true_rtx;
5065 break;
5066 case GT:
5067 case GE:
5068 if (has_sign)
5069 return const0_rtx;
5070 break;
5071 default:
5072 break;
5078 /* Optimize comparison of ABS with zero. */
5079 if (trueop1 == CONST0_RTX (mode)
5080 && (GET_CODE (trueop0) == ABS
5081 || (GET_CODE (trueop0) == FLOAT_EXTEND
5082 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5084 switch (code)
5086 case LT:
5087 /* Optimize abs(x) < 0.0. */
5088 if (!HONOR_SNANS (mode)
5089 && (!INTEGRAL_MODE_P (mode)
5090 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5092 if (INTEGRAL_MODE_P (mode)
5093 && (issue_strict_overflow_warning
5094 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5095 warning (OPT_Wstrict_overflow,
5096 ("assuming signed overflow does not occur when "
5097 "assuming abs (x) < 0 is false"));
5098 return const0_rtx;
5100 break;
5102 case GE:
5103 /* Optimize abs(x) >= 0.0. */
5104 if (!HONOR_NANS (mode)
5105 && (!INTEGRAL_MODE_P (mode)
5106 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5108 if (INTEGRAL_MODE_P (mode)
5109 && (issue_strict_overflow_warning
5110 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5111 warning (OPT_Wstrict_overflow,
5112 ("assuming signed overflow does not occur when "
5113 "assuming abs (x) >= 0 is true"));
5114 return const_true_rtx;
5116 break;
5118 case UNGE:
5119 /* Optimize ! (abs(x) < 0.0). */
5120 return const_true_rtx;
5122 default:
5123 break;
5127 return 0;
5130 /* Simplify CODE, an operation with result mode MODE and three operands,
5131 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5132 a constant. Return 0 if no simplifications is possible. */
5135 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5136 machine_mode op0_mode, rtx op0, rtx op1,
5137 rtx op2)
5139 unsigned int width = GET_MODE_PRECISION (mode);
5140 bool any_change = false;
5141 rtx tem, trueop2;
5143 /* VOIDmode means "infinite" precision. */
5144 if (width == 0)
5145 width = HOST_BITS_PER_WIDE_INT;
5147 switch (code)
5149 case FMA:
5150 /* Simplify negations around the multiplication. */
5151 /* -a * -b + c => a * b + c. */
5152 if (GET_CODE (op0) == NEG)
5154 tem = simplify_unary_operation (NEG, mode, op1, mode);
5155 if (tem)
5156 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5158 else if (GET_CODE (op1) == NEG)
5160 tem = simplify_unary_operation (NEG, mode, op0, mode);
5161 if (tem)
5162 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5165 /* Canonicalize the two multiplication operands. */
5166 /* a * -b + c => -b * a + c. */
5167 if (swap_commutative_operands_p (op0, op1))
5168 std::swap (op0, op1), any_change = true;
5170 if (any_change)
5171 return gen_rtx_FMA (mode, op0, op1, op2);
5172 return NULL_RTX;
5174 case SIGN_EXTRACT:
5175 case ZERO_EXTRACT:
5176 if (CONST_INT_P (op0)
5177 && CONST_INT_P (op1)
5178 && CONST_INT_P (op2)
5179 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5180 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5182 /* Extracting a bit-field from a constant */
5183 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5184 HOST_WIDE_INT op1val = INTVAL (op1);
5185 HOST_WIDE_INT op2val = INTVAL (op2);
5186 if (BITS_BIG_ENDIAN)
5187 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5188 else
5189 val >>= op2val;
5191 if (HOST_BITS_PER_WIDE_INT != op1val)
5193 /* First zero-extend. */
5194 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5195 /* If desired, propagate sign bit. */
5196 if (code == SIGN_EXTRACT
5197 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5198 != 0)
5199 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5202 return gen_int_mode (val, mode);
5204 break;
5206 case IF_THEN_ELSE:
5207 if (CONST_INT_P (op0))
5208 return op0 != const0_rtx ? op1 : op2;
5210 /* Convert c ? a : a into "a". */
5211 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5212 return op1;
5214 /* Convert a != b ? a : b into "a". */
5215 if (GET_CODE (op0) == NE
5216 && ! side_effects_p (op0)
5217 && ! HONOR_NANS (mode)
5218 && ! HONOR_SIGNED_ZEROS (mode)
5219 && ((rtx_equal_p (XEXP (op0, 0), op1)
5220 && rtx_equal_p (XEXP (op0, 1), op2))
5221 || (rtx_equal_p (XEXP (op0, 0), op2)
5222 && rtx_equal_p (XEXP (op0, 1), op1))))
5223 return op1;
5225 /* Convert a == b ? a : b into "b". */
5226 if (GET_CODE (op0) == EQ
5227 && ! side_effects_p (op0)
5228 && ! HONOR_NANS (mode)
5229 && ! HONOR_SIGNED_ZEROS (mode)
5230 && ((rtx_equal_p (XEXP (op0, 0), op1)
5231 && rtx_equal_p (XEXP (op0, 1), op2))
5232 || (rtx_equal_p (XEXP (op0, 0), op2)
5233 && rtx_equal_p (XEXP (op0, 1), op1))))
5234 return op2;
5236 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5238 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5239 ? GET_MODE (XEXP (op0, 1))
5240 : GET_MODE (XEXP (op0, 0)));
5241 rtx temp;
5243 /* Look for happy constants in op1 and op2. */
5244 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5246 HOST_WIDE_INT t = INTVAL (op1);
5247 HOST_WIDE_INT f = INTVAL (op2);
5249 if (t == STORE_FLAG_VALUE && f == 0)
5250 code = GET_CODE (op0);
5251 else if (t == 0 && f == STORE_FLAG_VALUE)
5253 enum rtx_code tmp;
5254 tmp = reversed_comparison_code (op0, NULL_RTX);
5255 if (tmp == UNKNOWN)
5256 break;
5257 code = tmp;
5259 else
5260 break;
5262 return simplify_gen_relational (code, mode, cmp_mode,
5263 XEXP (op0, 0), XEXP (op0, 1));
5266 if (cmp_mode == VOIDmode)
5267 cmp_mode = op0_mode;
5268 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5269 cmp_mode, XEXP (op0, 0),
5270 XEXP (op0, 1));
5272 /* See if any simplifications were possible. */
5273 if (temp)
5275 if (CONST_INT_P (temp))
5276 return temp == const0_rtx ? op2 : op1;
5277 else if (temp)
5278 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5281 break;
5283 case VEC_MERGE:
5284 gcc_assert (GET_MODE (op0) == mode);
5285 gcc_assert (GET_MODE (op1) == mode);
5286 gcc_assert (VECTOR_MODE_P (mode));
5287 trueop2 = avoid_constant_pool_reference (op2);
5288 if (CONST_INT_P (trueop2))
5290 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5291 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5292 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5293 unsigned HOST_WIDE_INT mask;
5294 if (n_elts == HOST_BITS_PER_WIDE_INT)
5295 mask = -1;
5296 else
5297 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5299 if (!(sel & mask) && !side_effects_p (op0))
5300 return op1;
5301 if ((sel & mask) == mask && !side_effects_p (op1))
5302 return op0;
5304 rtx trueop0 = avoid_constant_pool_reference (op0);
5305 rtx trueop1 = avoid_constant_pool_reference (op1);
5306 if (GET_CODE (trueop0) == CONST_VECTOR
5307 && GET_CODE (trueop1) == CONST_VECTOR)
5309 rtvec v = rtvec_alloc (n_elts);
5310 unsigned int i;
5312 for (i = 0; i < n_elts; i++)
5313 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5314 ? CONST_VECTOR_ELT (trueop0, i)
5315 : CONST_VECTOR_ELT (trueop1, i));
5316 return gen_rtx_CONST_VECTOR (mode, v);
5319 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5320 if no element from a appears in the result. */
5321 if (GET_CODE (op0) == VEC_MERGE)
5323 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5324 if (CONST_INT_P (tem))
5326 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5327 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5328 return simplify_gen_ternary (code, mode, mode,
5329 XEXP (op0, 1), op1, op2);
5330 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5331 return simplify_gen_ternary (code, mode, mode,
5332 XEXP (op0, 0), op1, op2);
5335 if (GET_CODE (op1) == VEC_MERGE)
5337 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5338 if (CONST_INT_P (tem))
5340 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5341 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5342 return simplify_gen_ternary (code, mode, mode,
5343 op0, XEXP (op1, 1), op2);
5344 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5345 return simplify_gen_ternary (code, mode, mode,
5346 op0, XEXP (op1, 0), op2);
5350 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5351 with a. */
5352 if (GET_CODE (op0) == VEC_DUPLICATE
5353 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5354 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5355 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5357 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5358 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5360 if (XEXP (XEXP (op0, 0), 0) == op1
5361 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5362 return op1;
5367 if (rtx_equal_p (op0, op1)
5368 && !side_effects_p (op2) && !side_effects_p (op1))
5369 return op0;
5371 break;
5373 default:
5374 gcc_unreachable ();
5377 return 0;
5380 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5381 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5382 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5384 Works by unpacking OP into a collection of 8-bit values
5385 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5386 and then repacking them again for OUTERMODE. */
5388 static rtx
5389 simplify_immed_subreg (machine_mode outermode, rtx op,
5390 machine_mode innermode, unsigned int byte)
5392 enum {
5393 value_bit = 8,
5394 value_mask = (1 << value_bit) - 1
5396 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5397 int value_start;
5398 int i;
5399 int elem;
5401 int num_elem;
5402 rtx * elems;
5403 int elem_bitsize;
5404 rtx result_s;
5405 rtvec result_v = NULL;
5406 enum mode_class outer_class;
5407 machine_mode outer_submode;
5408 int max_bitsize;
5410 /* Some ports misuse CCmode. */
5411 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5412 return op;
5414 /* We have no way to represent a complex constant at the rtl level. */
5415 if (COMPLEX_MODE_P (outermode))
5416 return NULL_RTX;
5418 /* We support any size mode. */
5419 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5420 GET_MODE_BITSIZE (innermode));
5422 /* Unpack the value. */
5424 if (GET_CODE (op) == CONST_VECTOR)
5426 num_elem = CONST_VECTOR_NUNITS (op);
5427 elems = &CONST_VECTOR_ELT (op, 0);
5428 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5430 else
5432 num_elem = 1;
5433 elems = &op;
5434 elem_bitsize = max_bitsize;
5436 /* If this asserts, it is too complicated; reducing value_bit may help. */
5437 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5438 /* I don't know how to handle endianness of sub-units. */
5439 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5441 for (elem = 0; elem < num_elem; elem++)
5443 unsigned char * vp;
5444 rtx el = elems[elem];
5446 /* Vectors are kept in target memory order. (This is probably
5447 a mistake.) */
5449 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5450 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5451 / BITS_PER_UNIT);
5452 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5453 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5454 unsigned bytele = (subword_byte % UNITS_PER_WORD
5455 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5456 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5459 switch (GET_CODE (el))
5461 case CONST_INT:
5462 for (i = 0;
5463 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5464 i += value_bit)
5465 *vp++ = INTVAL (el) >> i;
5466 /* CONST_INTs are always logically sign-extended. */
5467 for (; i < elem_bitsize; i += value_bit)
5468 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5469 break;
5471 case CONST_WIDE_INT:
5473 rtx_mode_t val = std::make_pair (el, innermode);
5474 unsigned char extend = wi::sign_mask (val);
5476 for (i = 0; i < elem_bitsize; i += value_bit)
5477 *vp++ = wi::extract_uhwi (val, i, value_bit);
5478 for (; i < elem_bitsize; i += value_bit)
5479 *vp++ = extend;
5481 break;
5483 case CONST_DOUBLE:
5484 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5486 unsigned char extend = 0;
5487 /* If this triggers, someone should have generated a
5488 CONST_INT instead. */
5489 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5491 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5492 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5493 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5495 *vp++
5496 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5497 i += value_bit;
5500 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5501 extend = -1;
5502 for (; i < elem_bitsize; i += value_bit)
5503 *vp++ = extend;
5505 else
5507 /* This is big enough for anything on the platform. */
5508 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5509 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5511 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5512 gcc_assert (bitsize <= elem_bitsize);
5513 gcc_assert (bitsize % value_bit == 0);
5515 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5516 GET_MODE (el));
5518 /* real_to_target produces its result in words affected by
5519 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5520 and use WORDS_BIG_ENDIAN instead; see the documentation
5521 of SUBREG in rtl.texi. */
5522 for (i = 0; i < bitsize; i += value_bit)
5524 int ibase;
5525 if (WORDS_BIG_ENDIAN)
5526 ibase = bitsize - 1 - i;
5527 else
5528 ibase = i;
5529 *vp++ = tmp[ibase / 32] >> i % 32;
5532 /* It shouldn't matter what's done here, so fill it with
5533 zero. */
5534 for (; i < elem_bitsize; i += value_bit)
5535 *vp++ = 0;
5537 break;
5539 case CONST_FIXED:
5540 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5542 for (i = 0; i < elem_bitsize; i += value_bit)
5543 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5545 else
5547 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5548 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5549 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5550 i += value_bit)
5551 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5552 >> (i - HOST_BITS_PER_WIDE_INT);
5553 for (; i < elem_bitsize; i += value_bit)
5554 *vp++ = 0;
5556 break;
5558 default:
5559 gcc_unreachable ();
5563 /* Now, pick the right byte to start with. */
5564 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5565 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5566 will already have offset 0. */
5567 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5569 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5570 - byte);
5571 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5572 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5573 byte = (subword_byte % UNITS_PER_WORD
5574 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5577 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5578 so if it's become negative it will instead be very large.) */
5579 gcc_assert (byte < GET_MODE_SIZE (innermode));
5581 /* Convert from bytes to chunks of size value_bit. */
5582 value_start = byte * (BITS_PER_UNIT / value_bit);
5584 /* Re-pack the value. */
5586 if (VECTOR_MODE_P (outermode))
5588 num_elem = GET_MODE_NUNITS (outermode);
5589 result_v = rtvec_alloc (num_elem);
5590 elems = &RTVEC_ELT (result_v, 0);
5591 outer_submode = GET_MODE_INNER (outermode);
5593 else
5595 num_elem = 1;
5596 elems = &result_s;
5597 outer_submode = outermode;
5600 outer_class = GET_MODE_CLASS (outer_submode);
5601 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5603 gcc_assert (elem_bitsize % value_bit == 0);
5604 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5606 for (elem = 0; elem < num_elem; elem++)
5608 unsigned char *vp;
5610 /* Vectors are stored in target memory order. (This is probably
5611 a mistake.) */
5613 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5614 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5615 / BITS_PER_UNIT);
5616 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5617 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5618 unsigned bytele = (subword_byte % UNITS_PER_WORD
5619 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5620 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5623 switch (outer_class)
5625 case MODE_INT:
5626 case MODE_PARTIAL_INT:
5628 int u;
5629 int base = 0;
5630 int units
5631 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5632 / HOST_BITS_PER_WIDE_INT;
5633 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5634 wide_int r;
5636 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5637 return NULL_RTX;
5638 for (u = 0; u < units; u++)
5640 unsigned HOST_WIDE_INT buf = 0;
5641 for (i = 0;
5642 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5643 i += value_bit)
5644 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5646 tmp[u] = buf;
5647 base += HOST_BITS_PER_WIDE_INT;
5649 r = wide_int::from_array (tmp, units,
5650 GET_MODE_PRECISION (outer_submode));
5651 #if TARGET_SUPPORTS_WIDE_INT == 0
5652 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5653 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5654 return NULL_RTX;
5655 #endif
5656 elems[elem] = immed_wide_int_const (r, outer_submode);
5658 break;
5660 case MODE_FLOAT:
5661 case MODE_DECIMAL_FLOAT:
5663 REAL_VALUE_TYPE r;
5664 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5666 /* real_from_target wants its input in words affected by
5667 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5668 and use WORDS_BIG_ENDIAN instead; see the documentation
5669 of SUBREG in rtl.texi. */
5670 for (i = 0; i < max_bitsize / 32; i++)
5671 tmp[i] = 0;
5672 for (i = 0; i < elem_bitsize; i += value_bit)
5674 int ibase;
5675 if (WORDS_BIG_ENDIAN)
5676 ibase = elem_bitsize - 1 - i;
5677 else
5678 ibase = i;
5679 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5682 real_from_target (&r, tmp, outer_submode);
5683 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5685 break;
5687 case MODE_FRACT:
5688 case MODE_UFRACT:
5689 case MODE_ACCUM:
5690 case MODE_UACCUM:
5692 FIXED_VALUE_TYPE f;
5693 f.data.low = 0;
5694 f.data.high = 0;
5695 f.mode = outer_submode;
5697 for (i = 0;
5698 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5699 i += value_bit)
5700 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5701 for (; i < elem_bitsize; i += value_bit)
5702 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5703 << (i - HOST_BITS_PER_WIDE_INT));
5705 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5707 break;
5709 default:
5710 gcc_unreachable ();
5713 if (VECTOR_MODE_P (outermode))
5714 return gen_rtx_CONST_VECTOR (outermode, result_v);
5715 else
5716 return result_s;
5719 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5720 Return 0 if no simplifications are possible. */
5722 simplify_subreg (machine_mode outermode, rtx op,
5723 machine_mode innermode, unsigned int byte)
5725 /* Little bit of sanity checking. */
5726 gcc_assert (innermode != VOIDmode);
5727 gcc_assert (outermode != VOIDmode);
5728 gcc_assert (innermode != BLKmode);
5729 gcc_assert (outermode != BLKmode);
5731 gcc_assert (GET_MODE (op) == innermode
5732 || GET_MODE (op) == VOIDmode);
5734 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5735 return NULL_RTX;
5737 if (byte >= GET_MODE_SIZE (innermode))
5738 return NULL_RTX;
5740 if (outermode == innermode && !byte)
5741 return op;
5743 if (CONST_SCALAR_INT_P (op)
5744 || CONST_DOUBLE_AS_FLOAT_P (op)
5745 || GET_CODE (op) == CONST_FIXED
5746 || GET_CODE (op) == CONST_VECTOR)
5747 return simplify_immed_subreg (outermode, op, innermode, byte);
5749 /* Changing mode twice with SUBREG => just change it once,
5750 or not at all if changing back op starting mode. */
5751 if (GET_CODE (op) == SUBREG)
5753 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5754 int final_offset = byte + SUBREG_BYTE (op);
5755 rtx newx;
5757 if (outermode == innermostmode
5758 && byte == 0 && SUBREG_BYTE (op) == 0)
5759 return SUBREG_REG (op);
5761 /* The SUBREG_BYTE represents offset, as if the value were stored
5762 in memory. Irritating exception is paradoxical subreg, where
5763 we define SUBREG_BYTE to be 0. On big endian machines, this
5764 value should be negative. For a moment, undo this exception. */
5765 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5767 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5768 if (WORDS_BIG_ENDIAN)
5769 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5770 if (BYTES_BIG_ENDIAN)
5771 final_offset += difference % UNITS_PER_WORD;
5773 if (SUBREG_BYTE (op) == 0
5774 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5776 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5777 if (WORDS_BIG_ENDIAN)
5778 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5779 if (BYTES_BIG_ENDIAN)
5780 final_offset += difference % UNITS_PER_WORD;
5783 /* See whether resulting subreg will be paradoxical. */
5784 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5786 /* In nonparadoxical subregs we can't handle negative offsets. */
5787 if (final_offset < 0)
5788 return NULL_RTX;
5789 /* Bail out in case resulting subreg would be incorrect. */
5790 if (final_offset % GET_MODE_SIZE (outermode)
5791 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5792 return NULL_RTX;
5794 else
5796 int offset = 0;
5797 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5799 /* In paradoxical subreg, see if we are still looking on lower part.
5800 If so, our SUBREG_BYTE will be 0. */
5801 if (WORDS_BIG_ENDIAN)
5802 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5803 if (BYTES_BIG_ENDIAN)
5804 offset += difference % UNITS_PER_WORD;
5805 if (offset == final_offset)
5806 final_offset = 0;
5807 else
5808 return NULL_RTX;
5811 /* Recurse for further possible simplifications. */
5812 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5813 final_offset);
5814 if (newx)
5815 return newx;
5816 if (validate_subreg (outermode, innermostmode,
5817 SUBREG_REG (op), final_offset))
5819 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5820 if (SUBREG_PROMOTED_VAR_P (op)
5821 && SUBREG_PROMOTED_SIGN (op) >= 0
5822 && GET_MODE_CLASS (outermode) == MODE_INT
5823 && IN_RANGE (GET_MODE_SIZE (outermode),
5824 GET_MODE_SIZE (innermode),
5825 GET_MODE_SIZE (innermostmode))
5826 && subreg_lowpart_p (newx))
5828 SUBREG_PROMOTED_VAR_P (newx) = 1;
5829 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5831 return newx;
5833 return NULL_RTX;
5836 /* SUBREG of a hard register => just change the register number
5837 and/or mode. If the hard register is not valid in that mode,
5838 suppress this simplification. If the hard register is the stack,
5839 frame, or argument pointer, leave this as a SUBREG. */
5841 if (REG_P (op) && HARD_REGISTER_P (op))
5843 unsigned int regno, final_regno;
5845 regno = REGNO (op);
5846 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5847 if (HARD_REGISTER_NUM_P (final_regno))
5849 rtx x;
5850 int final_offset = byte;
5852 /* Adjust offset for paradoxical subregs. */
5853 if (byte == 0
5854 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5856 int difference = (GET_MODE_SIZE (innermode)
5857 - GET_MODE_SIZE (outermode));
5858 if (WORDS_BIG_ENDIAN)
5859 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5860 if (BYTES_BIG_ENDIAN)
5861 final_offset += difference % UNITS_PER_WORD;
5864 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5866 /* Propagate original regno. We don't have any way to specify
5867 the offset inside original regno, so do so only for lowpart.
5868 The information is used only by alias analysis that can not
5869 grog partial register anyway. */
5871 if (subreg_lowpart_offset (outermode, innermode) == byte)
5872 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5873 return x;
5877 /* If we have a SUBREG of a register that we are replacing and we are
5878 replacing it with a MEM, make a new MEM and try replacing the
5879 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5880 or if we would be widening it. */
5882 if (MEM_P (op)
5883 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5884 /* Allow splitting of volatile memory references in case we don't
5885 have instruction to move the whole thing. */
5886 && (! MEM_VOLATILE_P (op)
5887 || ! have_insn_for (SET, innermode))
5888 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5889 return adjust_address_nv (op, outermode, byte);
5891 /* Handle complex values represented as CONCAT
5892 of real and imaginary part. */
5893 if (GET_CODE (op) == CONCAT)
5895 unsigned int part_size, final_offset;
5896 rtx part, res;
5898 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5899 if (byte < part_size)
5901 part = XEXP (op, 0);
5902 final_offset = byte;
5904 else
5906 part = XEXP (op, 1);
5907 final_offset = byte - part_size;
5910 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5911 return NULL_RTX;
5913 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5914 if (res)
5915 return res;
5916 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5917 return gen_rtx_SUBREG (outermode, part, final_offset);
5918 return NULL_RTX;
5921 /* A SUBREG resulting from a zero extension may fold to zero if
5922 it extracts higher bits that the ZERO_EXTEND's source bits. */
5923 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5925 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5926 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5927 return CONST0_RTX (outermode);
5930 if (SCALAR_INT_MODE_P (outermode)
5931 && SCALAR_INT_MODE_P (innermode)
5932 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5933 && byte == subreg_lowpart_offset (outermode, innermode))
5935 rtx tem = simplify_truncation (outermode, op, innermode);
5936 if (tem)
5937 return tem;
5940 return NULL_RTX;
5943 /* Make a SUBREG operation or equivalent if it folds. */
5946 simplify_gen_subreg (machine_mode outermode, rtx op,
5947 machine_mode innermode, unsigned int byte)
5949 rtx newx;
5951 newx = simplify_subreg (outermode, op, innermode, byte);
5952 if (newx)
5953 return newx;
5955 if (GET_CODE (op) == SUBREG
5956 || GET_CODE (op) == CONCAT
5957 || GET_MODE (op) == VOIDmode)
5958 return NULL_RTX;
5960 if (validate_subreg (outermode, innermode, op, byte))
5961 return gen_rtx_SUBREG (outermode, op, byte);
5963 return NULL_RTX;
5966 /* Simplify X, an rtx expression.
5968 Return the simplified expression or NULL if no simplifications
5969 were possible.
5971 This is the preferred entry point into the simplification routines;
5972 however, we still allow passes to call the more specific routines.
5974 Right now GCC has three (yes, three) major bodies of RTL simplification
5975 code that need to be unified.
5977 1. fold_rtx in cse.c. This code uses various CSE specific
5978 information to aid in RTL simplification.
5980 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5981 it uses combine specific information to aid in RTL
5982 simplification.
5984 3. The routines in this file.
5987 Long term we want to only have one body of simplification code; to
5988 get to that state I recommend the following steps:
5990 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5991 which are not pass dependent state into these routines.
5993 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5994 use this routine whenever possible.
5996 3. Allow for pass dependent state to be provided to these
5997 routines and add simplifications based on the pass dependent
5998 state. Remove code from cse.c & combine.c that becomes
5999 redundant/dead.
6001 It will take time, but ultimately the compiler will be easier to
6002 maintain and improve. It's totally silly that when we add a
6003 simplification that it needs to be added to 4 places (3 for RTL
6004 simplification and 1 for tree simplification. */
6007 simplify_rtx (const_rtx x)
6009 const enum rtx_code code = GET_CODE (x);
6010 const machine_mode mode = GET_MODE (x);
6012 switch (GET_RTX_CLASS (code))
6014 case RTX_UNARY:
6015 return simplify_unary_operation (code, mode,
6016 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6017 case RTX_COMM_ARITH:
6018 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6019 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6021 /* Fall through.... */
6023 case RTX_BIN_ARITH:
6024 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6026 case RTX_TERNARY:
6027 case RTX_BITFIELD_OPS:
6028 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6029 XEXP (x, 0), XEXP (x, 1),
6030 XEXP (x, 2));
6032 case RTX_COMPARE:
6033 case RTX_COMM_COMPARE:
6034 return simplify_relational_operation (code, mode,
6035 ((GET_MODE (XEXP (x, 0))
6036 != VOIDmode)
6037 ? GET_MODE (XEXP (x, 0))
6038 : GET_MODE (XEXP (x, 1))),
6039 XEXP (x, 0),
6040 XEXP (x, 1));
6042 case RTX_EXTRA:
6043 if (code == SUBREG)
6044 return simplify_subreg (mode, SUBREG_REG (x),
6045 GET_MODE (SUBREG_REG (x)),
6046 SUBREG_BYTE (x));
6047 break;
6049 case RTX_OBJ:
6050 if (code == LO_SUM)
6052 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6053 if (GET_CODE (XEXP (x, 0)) == HIGH
6054 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6055 return XEXP (x, 1);
6057 break;
6059 default:
6060 break;
6062 return NULL;