[PR67828] don't unswitch on default defs of non-parms
[official-gcc.git] / gcc / simplify-rtx.c
blob1f91afc05213eb096e6cf5b768d2d3f9d7da5d8f
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "rtl.h"
27 #include "alias.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "varasm.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "insn-codes.h"
37 #include "optabs.h"
38 #include "expmed.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "calls.h"
42 #include "emit-rtl.h"
43 #include "stmt.h"
44 #include "expr.h"
45 #include "diagnostic-core.h"
46 #include "target.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
53 signed wide int. */
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx neg_const_int (machine_mode, const_rtx);
58 static bool plus_minus_operand_p (const_rtx);
59 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
60 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
61 unsigned int);
62 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
63 rtx, rtx);
64 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
65 machine_mode, rtx, rtx);
66 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
67 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
68 rtx, rtx, rtx, rtx);
70 /* Negate a CONST_INT rtx, truncating (because a conversion from a
71 maximally negative number can overflow). */
72 static rtx
73 neg_const_int (machine_mode mode, const_rtx i)
75 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
78 /* Test whether expression, X, is an immediate constant that represents
79 the most significant bit of machine mode MODE. */
81 bool
82 mode_signbit_p (machine_mode mode, const_rtx x)
84 unsigned HOST_WIDE_INT val;
85 unsigned int width;
87 if (GET_MODE_CLASS (mode) != MODE_INT)
88 return false;
90 width = GET_MODE_PRECISION (mode);
91 if (width == 0)
92 return false;
94 if (width <= HOST_BITS_PER_WIDE_INT
95 && CONST_INT_P (x))
96 val = INTVAL (x);
97 #if TARGET_SUPPORTS_WIDE_INT
98 else if (CONST_WIDE_INT_P (x))
100 unsigned int i;
101 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
102 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
103 return false;
104 for (i = 0; i < elts - 1; i++)
105 if (CONST_WIDE_INT_ELT (x, i) != 0)
106 return false;
107 val = CONST_WIDE_INT_ELT (x, elts - 1);
108 width %= HOST_BITS_PER_WIDE_INT;
109 if (width == 0)
110 width = HOST_BITS_PER_WIDE_INT;
112 #else
113 else if (width <= HOST_BITS_PER_DOUBLE_INT
114 && CONST_DOUBLE_AS_INT_P (x)
115 && CONST_DOUBLE_LOW (x) == 0)
117 val = CONST_DOUBLE_HIGH (x);
118 width -= HOST_BITS_PER_WIDE_INT;
120 #endif
121 else
122 /* X is not an integer constant. */
123 return false;
125 if (width < HOST_BITS_PER_WIDE_INT)
126 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
127 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
130 /* Test whether VAL is equal to the most significant bit of mode MODE
131 (after masking with the mode mask of MODE). Returns false if the
132 precision of MODE is too large to handle. */
134 bool
135 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
137 unsigned int width;
139 if (GET_MODE_CLASS (mode) != MODE_INT)
140 return false;
142 width = GET_MODE_PRECISION (mode);
143 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
144 return false;
146 val &= GET_MODE_MASK (mode);
147 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
150 /* Test whether the most significant bit of mode MODE is set in VAL.
151 Returns false if the precision of MODE is too large to handle. */
152 bool
153 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
155 unsigned int width;
157 if (GET_MODE_CLASS (mode) != MODE_INT)
158 return false;
160 width = GET_MODE_PRECISION (mode);
161 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
162 return false;
164 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
165 return val != 0;
168 /* Test whether the most significant bit of mode MODE is clear in VAL.
169 Returns false if the precision of MODE is too large to handle. */
170 bool
171 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
173 unsigned int width;
175 if (GET_MODE_CLASS (mode) != MODE_INT)
176 return false;
178 width = GET_MODE_PRECISION (mode);
179 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
180 return false;
182 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
183 return val == 0;
186 /* Make a binary operation by properly ordering the operands and
187 seeing if the expression folds. */
190 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
191 rtx op1)
193 rtx tem;
195 /* If this simplifies, do it. */
196 tem = simplify_binary_operation (code, mode, op0, op1);
197 if (tem)
198 return tem;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
202 && swap_commutative_operands_p (op0, op1))
203 std::swap (op0, op1);
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
208 /* If X is a MEM referencing the constant pool, return the real value.
209 Otherwise return X. */
211 avoid_constant_pool_reference (rtx x)
213 rtx c, tmp, addr;
214 machine_mode cmode;
215 HOST_WIDE_INT offset = 0;
217 switch (GET_CODE (x))
219 case MEM:
220 break;
222 case FLOAT_EXTEND:
223 /* Handle float extensions of constant pool references. */
224 tmp = XEXP (x, 0);
225 c = avoid_constant_pool_reference (tmp);
226 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
227 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
228 GET_MODE (x));
229 return x;
231 default:
232 return x;
235 if (GET_MODE (x) == BLKmode)
236 return x;
238 addr = XEXP (x, 0);
240 /* Call target hook to avoid the effects of -fpic etc.... */
241 addr = targetm.delegitimize_address (addr);
243 /* Split the address into a base and integer offset. */
244 if (GET_CODE (addr) == CONST
245 && GET_CODE (XEXP (addr, 0)) == PLUS
246 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
248 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
249 addr = XEXP (XEXP (addr, 0), 0);
252 if (GET_CODE (addr) == LO_SUM)
253 addr = XEXP (addr, 1);
255 /* If this is a constant pool reference, we can turn it into its
256 constant and hope that simplifications happen. */
257 if (GET_CODE (addr) == SYMBOL_REF
258 && CONSTANT_POOL_ADDRESS_P (addr))
260 c = get_pool_constant (addr);
261 cmode = get_pool_mode (addr);
263 /* If we're accessing the constant in a different mode than it was
264 originally stored, attempt to fix that up via subreg simplifications.
265 If that fails we have no choice but to return the original memory. */
266 if ((offset != 0 || cmode != GET_MODE (x))
267 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
269 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
270 if (tem && CONSTANT_P (tem))
271 return tem;
273 else
274 return c;
277 return x;
280 /* Simplify a MEM based on its attributes. This is the default
281 delegitimize_address target hook, and it's recommended that every
282 overrider call it. */
285 delegitimize_mem_from_attrs (rtx x)
287 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
288 use their base addresses as equivalent. */
289 if (MEM_P (x)
290 && MEM_EXPR (x)
291 && MEM_OFFSET_KNOWN_P (x))
293 tree decl = MEM_EXPR (x);
294 machine_mode mode = GET_MODE (x);
295 HOST_WIDE_INT offset = 0;
297 switch (TREE_CODE (decl))
299 default:
300 decl = NULL;
301 break;
303 case VAR_DECL:
304 break;
306 case ARRAY_REF:
307 case ARRAY_RANGE_REF:
308 case COMPONENT_REF:
309 case BIT_FIELD_REF:
310 case REALPART_EXPR:
311 case IMAGPART_EXPR:
312 case VIEW_CONVERT_EXPR:
314 HOST_WIDE_INT bitsize, bitpos;
315 tree toffset;
316 int unsignedp, volatilep = 0;
318 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
319 &mode, &unsignedp, &volatilep, false);
320 if (bitsize != GET_MODE_BITSIZE (mode)
321 || (bitpos % BITS_PER_UNIT)
322 || (toffset && !tree_fits_shwi_p (toffset)))
323 decl = NULL;
324 else
326 offset += bitpos / BITS_PER_UNIT;
327 if (toffset)
328 offset += tree_to_shwi (toffset);
330 break;
334 if (decl
335 && mode == GET_MODE (x)
336 && TREE_CODE (decl) == VAR_DECL
337 && (TREE_STATIC (decl)
338 || DECL_THREAD_LOCAL_P (decl))
339 && DECL_RTL_SET_P (decl)
340 && MEM_P (DECL_RTL (decl)))
342 rtx newx;
344 offset += MEM_OFFSET (x);
346 newx = DECL_RTL (decl);
348 if (MEM_P (newx))
350 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
352 /* Avoid creating a new MEM needlessly if we already had
353 the same address. We do if there's no OFFSET and the
354 old address X is identical to NEWX, or if X is of the
355 form (plus NEWX OFFSET), or the NEWX is of the form
356 (plus Y (const_int Z)) and X is that with the offset
357 added: (plus Y (const_int Z+OFFSET)). */
358 if (!((offset == 0
359 || (GET_CODE (o) == PLUS
360 && GET_CODE (XEXP (o, 1)) == CONST_INT
361 && (offset == INTVAL (XEXP (o, 1))
362 || (GET_CODE (n) == PLUS
363 && GET_CODE (XEXP (n, 1)) == CONST_INT
364 && (INTVAL (XEXP (n, 1)) + offset
365 == INTVAL (XEXP (o, 1)))
366 && (n = XEXP (n, 0))))
367 && (o = XEXP (o, 0))))
368 && rtx_equal_p (o, n)))
369 x = adjust_address_nv (newx, mode, offset);
371 else if (GET_MODE (x) == GET_MODE (newx)
372 && offset == 0)
373 x = newx;
377 return x;
380 /* Make a unary operation by first seeing if it folds and otherwise making
381 the specified operation. */
384 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
385 machine_mode op_mode)
387 rtx tem;
389 /* If this simplifies, use it. */
390 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
391 return tem;
393 return gen_rtx_fmt_e (code, mode, op);
396 /* Likewise for ternary operations. */
399 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
400 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
402 rtx tem;
404 /* If this simplifies, use it. */
405 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
406 op0, op1, op2)))
407 return tem;
409 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
412 /* Likewise, for relational operations.
413 CMP_MODE specifies mode comparison is done in. */
416 simplify_gen_relational (enum rtx_code code, machine_mode mode,
417 machine_mode cmp_mode, rtx op0, rtx op1)
419 rtx tem;
421 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
422 op0, op1)))
423 return tem;
425 return gen_rtx_fmt_ee (code, mode, op0, op1);
428 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
429 and simplify the result. If FN is non-NULL, call this callback on each
430 X, if it returns non-NULL, replace X with its return value and simplify the
431 result. */
434 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
435 rtx (*fn) (rtx, const_rtx, void *), void *data)
437 enum rtx_code code = GET_CODE (x);
438 machine_mode mode = GET_MODE (x);
439 machine_mode op_mode;
440 const char *fmt;
441 rtx op0, op1, op2, newx, op;
442 rtvec vec, newvec;
443 int i, j;
445 if (__builtin_expect (fn != NULL, 0))
447 newx = fn (x, old_rtx, data);
448 if (newx)
449 return newx;
451 else if (rtx_equal_p (x, old_rtx))
452 return copy_rtx ((rtx) data);
454 switch (GET_RTX_CLASS (code))
456 case RTX_UNARY:
457 op0 = XEXP (x, 0);
458 op_mode = GET_MODE (op0);
459 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460 if (op0 == XEXP (x, 0))
461 return x;
462 return simplify_gen_unary (code, mode, op0, op_mode);
464 case RTX_BIN_ARITH:
465 case RTX_COMM_ARITH:
466 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_binary (code, mode, op0, op1);
472 case RTX_COMPARE:
473 case RTX_COMM_COMPARE:
474 op0 = XEXP (x, 0);
475 op1 = XEXP (x, 1);
476 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
477 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
478 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
480 return x;
481 return simplify_gen_relational (code, mode, op_mode, op0, op1);
483 case RTX_TERNARY:
484 case RTX_BITFIELD_OPS:
485 op0 = XEXP (x, 0);
486 op_mode = GET_MODE (op0);
487 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
488 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
489 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
490 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
491 return x;
492 if (op_mode == VOIDmode)
493 op_mode = GET_MODE (op0);
494 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
496 case RTX_EXTRA:
497 if (code == SUBREG)
499 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
500 if (op0 == SUBREG_REG (x))
501 return x;
502 op0 = simplify_gen_subreg (GET_MODE (x), op0,
503 GET_MODE (SUBREG_REG (x)),
504 SUBREG_BYTE (x));
505 return op0 ? op0 : x;
507 break;
509 case RTX_OBJ:
510 if (code == MEM)
512 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
513 if (op0 == XEXP (x, 0))
514 return x;
515 return replace_equiv_address_nv (x, op0);
517 else if (code == LO_SUM)
519 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
520 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
522 /* (lo_sum (high x) y) -> y where x and y have the same base. */
523 if (GET_CODE (op0) == HIGH)
525 rtx base0, base1, offset0, offset1;
526 split_const (XEXP (op0, 0), &base0, &offset0);
527 split_const (op1, &base1, &offset1);
528 if (rtx_equal_p (base0, base1))
529 return op1;
532 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
533 return x;
534 return gen_rtx_LO_SUM (mode, op0, op1);
536 break;
538 default:
539 break;
542 newx = x;
543 fmt = GET_RTX_FORMAT (code);
544 for (i = 0; fmt[i]; i++)
545 switch (fmt[i])
547 case 'E':
548 vec = XVEC (x, i);
549 newvec = XVEC (newx, i);
550 for (j = 0; j < GET_NUM_ELEM (vec); j++)
552 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
553 old_rtx, fn, data);
554 if (op != RTVEC_ELT (vec, j))
556 if (newvec == vec)
558 newvec = shallow_copy_rtvec (vec);
559 if (x == newx)
560 newx = shallow_copy_rtx (x);
561 XVEC (newx, i) = newvec;
563 RTVEC_ELT (newvec, j) = op;
566 break;
568 case 'e':
569 if (XEXP (x, i))
571 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
572 if (op != XEXP (x, i))
574 if (x == newx)
575 newx = shallow_copy_rtx (x);
576 XEXP (newx, i) = op;
579 break;
581 return newx;
584 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
585 resulting RTX. Return a new RTX which is as simplified as possible. */
588 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
590 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
593 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
594 Only handle cases where the truncated value is inherently an rvalue.
596 RTL provides two ways of truncating a value:
598 1. a lowpart subreg. This form is only a truncation when both
599 the outer and inner modes (here MODE and OP_MODE respectively)
600 are scalar integers, and only then when the subreg is used as
601 an rvalue.
603 It is only valid to form such truncating subregs if the
604 truncation requires no action by the target. The onus for
605 proving this is on the creator of the subreg -- e.g. the
606 caller to simplify_subreg or simplify_gen_subreg -- and typically
607 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
609 2. a TRUNCATE. This form handles both scalar and compound integers.
611 The first form is preferred where valid. However, the TRUNCATE
612 handling in simplify_unary_operation turns the second form into the
613 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
614 so it is generally safe to form rvalue truncations using:
616 simplify_gen_unary (TRUNCATE, ...)
618 and leave simplify_unary_operation to work out which representation
619 should be used.
621 Because of the proof requirements on (1), simplify_truncation must
622 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
623 regardless of whether the outer truncation came from a SUBREG or a
624 TRUNCATE. For example, if the caller has proven that an SImode
625 truncation of:
627 (and:DI X Y)
629 is a no-op and can be represented as a subreg, it does not follow
630 that SImode truncations of X and Y are also no-ops. On a target
631 like 64-bit MIPS that requires SImode values to be stored in
632 sign-extended form, an SImode truncation of:
634 (and:DI (reg:DI X) (const_int 63))
636 is trivially a no-op because only the lower 6 bits can be set.
637 However, X is still an arbitrary 64-bit number and so we cannot
638 assume that truncating it too is a no-op. */
640 static rtx
641 simplify_truncation (machine_mode mode, rtx op,
642 machine_mode op_mode)
644 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
645 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
646 gcc_assert (precision <= op_precision);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728 /* Recognize a word extraction from a multi-word subreg. */
729 if ((GET_CODE (op) == LSHIFTRT
730 || GET_CODE (op) == ASHIFTRT)
731 && SCALAR_INT_MODE_P (mode)
732 && SCALAR_INT_MODE_P (op_mode)
733 && precision >= BITS_PER_WORD
734 && 2 * precision <= op_precision
735 && CONST_INT_P (XEXP (op, 1))
736 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
737 && UINTVAL (XEXP (op, 1)) < op_precision)
739 int byte = subreg_lowpart_offset (mode, op_mode);
740 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
741 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
742 (WORDS_BIG_ENDIAN
743 ? byte - shifted_bytes
744 : byte + shifted_bytes));
747 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
748 and try replacing the TRUNCATE and shift with it. Don't do this
749 if the MEM has a mode-dependent address. */
750 if ((GET_CODE (op) == LSHIFTRT
751 || GET_CODE (op) == ASHIFTRT)
752 && SCALAR_INT_MODE_P (op_mode)
753 && MEM_P (XEXP (op, 0))
754 && CONST_INT_P (XEXP (op, 1))
755 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
756 && INTVAL (XEXP (op, 1)) > 0
757 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
758 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
759 MEM_ADDR_SPACE (XEXP (op, 0)))
760 && ! MEM_VOLATILE_P (XEXP (op, 0))
761 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
762 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
764 int byte = subreg_lowpart_offset (mode, op_mode);
765 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
766 return adjust_address_nv (XEXP (op, 0), mode,
767 (WORDS_BIG_ENDIAN
768 ? byte - shifted_bytes
769 : byte + shifted_bytes));
772 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
773 (OP:SI foo:SI) if OP is NEG or ABS. */
774 if ((GET_CODE (op) == ABS
775 || GET_CODE (op) == NEG)
776 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
777 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
778 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
779 return simplify_gen_unary (GET_CODE (op), mode,
780 XEXP (XEXP (op, 0), 0), mode);
782 /* (truncate:A (subreg:B (truncate:C X) 0)) is
783 (truncate:A X). */
784 if (GET_CODE (op) == SUBREG
785 && SCALAR_INT_MODE_P (mode)
786 && SCALAR_INT_MODE_P (op_mode)
787 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
788 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
789 && subreg_lowpart_p (op))
791 rtx inner = XEXP (SUBREG_REG (op), 0);
792 if (GET_MODE_PRECISION (mode)
793 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
794 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
795 else
796 /* If subreg above is paradoxical and C is narrower
797 than A, return (subreg:A (truncate:C X) 0). */
798 return simplify_gen_subreg (mode, SUBREG_REG (op),
799 GET_MODE (SUBREG_REG (op)), 0);
802 /* (truncate:A (truncate:B X)) is (truncate:A X). */
803 if (GET_CODE (op) == TRUNCATE)
804 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
805 GET_MODE (XEXP (op, 0)));
807 return NULL_RTX;
810 /* Try to simplify a unary operation CODE whose output mode is to be
811 MODE with input operand OP whose mode was originally OP_MODE.
812 Return zero if no simplification can be made. */
814 simplify_unary_operation (enum rtx_code code, machine_mode mode,
815 rtx op, machine_mode op_mode)
817 rtx trueop, tem;
819 trueop = avoid_constant_pool_reference (op);
821 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
822 if (tem)
823 return tem;
825 return simplify_unary_operation_1 (code, mode, op);
828 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
829 to be exact. */
831 static bool
832 exact_int_to_float_conversion_p (const_rtx op)
834 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
835 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
836 /* Constants shouldn't reach here. */
837 gcc_assert (op0_mode != VOIDmode);
838 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
839 int in_bits = in_prec;
840 if (HWI_COMPUTABLE_MODE_P (op0_mode))
842 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
843 if (GET_CODE (op) == FLOAT)
844 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
845 else if (GET_CODE (op) == UNSIGNED_FLOAT)
846 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
847 else
848 gcc_unreachable ();
849 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
851 return in_bits <= out_bits;
854 /* Perform some simplifications we can do even if the operands
855 aren't constant. */
856 static rtx
857 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
859 enum rtx_code reversed;
860 rtx temp;
862 switch (code)
864 case NOT:
865 /* (not (not X)) == X. */
866 if (GET_CODE (op) == NOT)
867 return XEXP (op, 0);
869 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
870 comparison is all ones. */
871 if (COMPARISON_P (op)
872 && (mode == BImode || STORE_FLAG_VALUE == -1)
873 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
874 return simplify_gen_relational (reversed, mode, VOIDmode,
875 XEXP (op, 0), XEXP (op, 1));
877 /* (not (plus X -1)) can become (neg X). */
878 if (GET_CODE (op) == PLUS
879 && XEXP (op, 1) == constm1_rtx)
880 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
882 /* Similarly, (not (neg X)) is (plus X -1). */
883 if (GET_CODE (op) == NEG)
884 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
885 CONSTM1_RTX (mode));
887 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
888 if (GET_CODE (op) == XOR
889 && CONST_INT_P (XEXP (op, 1))
890 && (temp = simplify_unary_operation (NOT, mode,
891 XEXP (op, 1), mode)) != 0)
892 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
894 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
895 if (GET_CODE (op) == PLUS
896 && CONST_INT_P (XEXP (op, 1))
897 && mode_signbit_p (mode, XEXP (op, 1))
898 && (temp = simplify_unary_operation (NOT, mode,
899 XEXP (op, 1), mode)) != 0)
900 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
903 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
904 operands other than 1, but that is not valid. We could do a
905 similar simplification for (not (lshiftrt C X)) where C is
906 just the sign bit, but this doesn't seem common enough to
907 bother with. */
908 if (GET_CODE (op) == ASHIFT
909 && XEXP (op, 0) == const1_rtx)
911 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
912 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
915 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
916 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
917 so we can perform the above simplification. */
918 if (STORE_FLAG_VALUE == -1
919 && GET_CODE (op) == ASHIFTRT
920 && CONST_INT_P (XEXP (op, 1))
921 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
922 return simplify_gen_relational (GE, mode, VOIDmode,
923 XEXP (op, 0), const0_rtx);
926 if (GET_CODE (op) == SUBREG
927 && subreg_lowpart_p (op)
928 && (GET_MODE_SIZE (GET_MODE (op))
929 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
930 && GET_CODE (SUBREG_REG (op)) == ASHIFT
931 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
933 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
934 rtx x;
936 x = gen_rtx_ROTATE (inner_mode,
937 simplify_gen_unary (NOT, inner_mode, const1_rtx,
938 inner_mode),
939 XEXP (SUBREG_REG (op), 1));
940 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
941 if (temp)
942 return temp;
945 /* Apply De Morgan's laws to reduce number of patterns for machines
946 with negating logical insns (and-not, nand, etc.). If result has
947 only one NOT, put it first, since that is how the patterns are
948 coded. */
949 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
951 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
952 machine_mode op_mode;
954 op_mode = GET_MODE (in1);
955 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
957 op_mode = GET_MODE (in2);
958 if (op_mode == VOIDmode)
959 op_mode = mode;
960 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
962 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
963 std::swap (in1, in2);
965 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
966 mode, in1, in2);
969 /* (not (bswap x)) -> (bswap (not x)). */
970 if (GET_CODE (op) == BSWAP)
972 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
973 return simplify_gen_unary (BSWAP, mode, x, mode);
975 break;
977 case NEG:
978 /* (neg (neg X)) == X. */
979 if (GET_CODE (op) == NEG)
980 return XEXP (op, 0);
982 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
983 If comparison is not reversible use
984 x ? y : (neg y). */
985 if (GET_CODE (op) == IF_THEN_ELSE)
987 rtx cond = XEXP (op, 0);
988 rtx true_rtx = XEXP (op, 1);
989 rtx false_rtx = XEXP (op, 2);
991 if ((GET_CODE (true_rtx) == NEG
992 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
993 || (GET_CODE (false_rtx) == NEG
994 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
996 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
997 temp = reversed_comparison (cond, mode);
998 else
1000 temp = cond;
1001 std::swap (true_rtx, false_rtx);
1003 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1004 mode, temp, true_rtx, false_rtx);
1008 /* (neg (plus X 1)) can become (not X). */
1009 if (GET_CODE (op) == PLUS
1010 && XEXP (op, 1) == const1_rtx)
1011 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1013 /* Similarly, (neg (not X)) is (plus X 1). */
1014 if (GET_CODE (op) == NOT)
1015 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1016 CONST1_RTX (mode));
1018 /* (neg (minus X Y)) can become (minus Y X). This transformation
1019 isn't safe for modes with signed zeros, since if X and Y are
1020 both +0, (minus Y X) is the same as (minus X Y). If the
1021 rounding mode is towards +infinity (or -infinity) then the two
1022 expressions will be rounded differently. */
1023 if (GET_CODE (op) == MINUS
1024 && !HONOR_SIGNED_ZEROS (mode)
1025 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1026 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1028 if (GET_CODE (op) == PLUS
1029 && !HONOR_SIGNED_ZEROS (mode)
1030 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1032 /* (neg (plus A C)) is simplified to (minus -C A). */
1033 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1034 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1036 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1037 if (temp)
1038 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1041 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1042 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1043 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1046 /* (neg (mult A B)) becomes (mult A (neg B)).
1047 This works even for floating-point values. */
1048 if (GET_CODE (op) == MULT
1049 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1051 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1052 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1055 /* NEG commutes with ASHIFT since it is multiplication. Only do
1056 this if we can then eliminate the NEG (e.g., if the operand
1057 is a constant). */
1058 if (GET_CODE (op) == ASHIFT)
1060 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1061 if (temp)
1062 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1065 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1066 C is equal to the width of MODE minus 1. */
1067 if (GET_CODE (op) == ASHIFTRT
1068 && CONST_INT_P (XEXP (op, 1))
1069 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1070 return simplify_gen_binary (LSHIFTRT, mode,
1071 XEXP (op, 0), XEXP (op, 1));
1073 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1074 C is equal to the width of MODE minus 1. */
1075 if (GET_CODE (op) == LSHIFTRT
1076 && CONST_INT_P (XEXP (op, 1))
1077 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1078 return simplify_gen_binary (ASHIFTRT, mode,
1079 XEXP (op, 0), XEXP (op, 1));
1081 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1082 if (GET_CODE (op) == XOR
1083 && XEXP (op, 1) == const1_rtx
1084 && nonzero_bits (XEXP (op, 0), mode) == 1)
1085 return plus_constant (mode, XEXP (op, 0), -1);
1087 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1088 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1089 if (GET_CODE (op) == LT
1090 && XEXP (op, 1) == const0_rtx
1091 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1093 machine_mode inner = GET_MODE (XEXP (op, 0));
1094 int isize = GET_MODE_PRECISION (inner);
1095 if (STORE_FLAG_VALUE == 1)
1097 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1098 GEN_INT (isize - 1));
1099 if (mode == inner)
1100 return temp;
1101 if (GET_MODE_PRECISION (mode) > isize)
1102 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1103 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1105 else if (STORE_FLAG_VALUE == -1)
1107 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1108 GEN_INT (isize - 1));
1109 if (mode == inner)
1110 return temp;
1111 if (GET_MODE_PRECISION (mode) > isize)
1112 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1113 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1116 break;
1118 case TRUNCATE:
1119 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1120 with the umulXi3_highpart patterns. */
1121 if (GET_CODE (op) == LSHIFTRT
1122 && GET_CODE (XEXP (op, 0)) == MULT)
1123 break;
1125 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1127 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1129 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1130 if (temp)
1131 return temp;
1133 /* We can't handle truncation to a partial integer mode here
1134 because we don't know the real bitsize of the partial
1135 integer mode. */
1136 break;
1139 if (GET_MODE (op) != VOIDmode)
1141 temp = simplify_truncation (mode, op, GET_MODE (op));
1142 if (temp)
1143 return temp;
1146 /* If we know that the value is already truncated, we can
1147 replace the TRUNCATE with a SUBREG. */
1148 if (GET_MODE_NUNITS (mode) == 1
1149 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1150 || truncated_to_mode (mode, op)))
1152 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1153 if (temp)
1154 return temp;
1157 /* A truncate of a comparison can be replaced with a subreg if
1158 STORE_FLAG_VALUE permits. This is like the previous test,
1159 but it works even if the comparison is done in a mode larger
1160 than HOST_BITS_PER_WIDE_INT. */
1161 if (HWI_COMPUTABLE_MODE_P (mode)
1162 && COMPARISON_P (op)
1163 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1165 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1166 if (temp)
1167 return temp;
1170 /* A truncate of a memory is just loading the low part of the memory
1171 if we are not changing the meaning of the address. */
1172 if (GET_CODE (op) == MEM
1173 && !VECTOR_MODE_P (mode)
1174 && !MEM_VOLATILE_P (op)
1175 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1177 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1178 if (temp)
1179 return temp;
1182 break;
1184 case FLOAT_TRUNCATE:
1185 if (DECIMAL_FLOAT_MODE_P (mode))
1186 break;
1188 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1189 if (GET_CODE (op) == FLOAT_EXTEND
1190 && GET_MODE (XEXP (op, 0)) == mode)
1191 return XEXP (op, 0);
1193 /* (float_truncate:SF (float_truncate:DF foo:XF))
1194 = (float_truncate:SF foo:XF).
1195 This may eliminate double rounding, so it is unsafe.
1197 (float_truncate:SF (float_extend:XF foo:DF))
1198 = (float_truncate:SF foo:DF).
1200 (float_truncate:DF (float_extend:XF foo:SF))
1201 = (float_extend:DF foo:SF). */
1202 if ((GET_CODE (op) == FLOAT_TRUNCATE
1203 && flag_unsafe_math_optimizations)
1204 || GET_CODE (op) == FLOAT_EXTEND)
1205 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1206 0)))
1207 > GET_MODE_SIZE (mode)
1208 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1209 mode,
1210 XEXP (op, 0), mode);
1212 /* (float_truncate (float x)) is (float x) */
1213 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1214 && (flag_unsafe_math_optimizations
1215 || exact_int_to_float_conversion_p (op)))
1216 return simplify_gen_unary (GET_CODE (op), mode,
1217 XEXP (op, 0),
1218 GET_MODE (XEXP (op, 0)));
1220 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1221 (OP:SF foo:SF) if OP is NEG or ABS. */
1222 if ((GET_CODE (op) == ABS
1223 || GET_CODE (op) == NEG)
1224 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1225 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1226 return simplify_gen_unary (GET_CODE (op), mode,
1227 XEXP (XEXP (op, 0), 0), mode);
1229 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1230 is (float_truncate:SF x). */
1231 if (GET_CODE (op) == SUBREG
1232 && subreg_lowpart_p (op)
1233 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1234 return SUBREG_REG (op);
1235 break;
1237 case FLOAT_EXTEND:
1238 if (DECIMAL_FLOAT_MODE_P (mode))
1239 break;
1241 /* (float_extend (float_extend x)) is (float_extend x)
1243 (float_extend (float x)) is (float x) assuming that double
1244 rounding can't happen.
1246 if (GET_CODE (op) == FLOAT_EXTEND
1247 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1248 && exact_int_to_float_conversion_p (op)))
1249 return simplify_gen_unary (GET_CODE (op), mode,
1250 XEXP (op, 0),
1251 GET_MODE (XEXP (op, 0)));
1253 break;
1255 case ABS:
1256 /* (abs (neg <foo>)) -> (abs <foo>) */
1257 if (GET_CODE (op) == NEG)
1258 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1259 GET_MODE (XEXP (op, 0)));
1261 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1262 do nothing. */
1263 if (GET_MODE (op) == VOIDmode)
1264 break;
1266 /* If operand is something known to be positive, ignore the ABS. */
1267 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1268 || val_signbit_known_clear_p (GET_MODE (op),
1269 nonzero_bits (op, GET_MODE (op))))
1270 return op;
1272 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1273 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1274 return gen_rtx_NEG (mode, op);
1276 break;
1278 case FFS:
1279 /* (ffs (*_extend <X>)) = (ffs <X>) */
1280 if (GET_CODE (op) == SIGN_EXTEND
1281 || GET_CODE (op) == ZERO_EXTEND)
1282 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1283 GET_MODE (XEXP (op, 0)));
1284 break;
1286 case POPCOUNT:
1287 switch (GET_CODE (op))
1289 case BSWAP:
1290 case ZERO_EXTEND:
1291 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1292 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1293 GET_MODE (XEXP (op, 0)));
1295 case ROTATE:
1296 case ROTATERT:
1297 /* Rotations don't affect popcount. */
1298 if (!side_effects_p (XEXP (op, 1)))
1299 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1300 GET_MODE (XEXP (op, 0)));
1301 break;
1303 default:
1304 break;
1306 break;
1308 case PARITY:
1309 switch (GET_CODE (op))
1311 case NOT:
1312 case BSWAP:
1313 case ZERO_EXTEND:
1314 case SIGN_EXTEND:
1315 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1316 GET_MODE (XEXP (op, 0)));
1318 case ROTATE:
1319 case ROTATERT:
1320 /* Rotations don't affect parity. */
1321 if (!side_effects_p (XEXP (op, 1)))
1322 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1323 GET_MODE (XEXP (op, 0)));
1324 break;
1326 default:
1327 break;
1329 break;
1331 case BSWAP:
1332 /* (bswap (bswap x)) -> x. */
1333 if (GET_CODE (op) == BSWAP)
1334 return XEXP (op, 0);
1335 break;
1337 case FLOAT:
1338 /* (float (sign_extend <X>)) = (float <X>). */
1339 if (GET_CODE (op) == SIGN_EXTEND)
1340 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1341 GET_MODE (XEXP (op, 0)));
1342 break;
1344 case SIGN_EXTEND:
1345 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1346 becomes just the MINUS if its mode is MODE. This allows
1347 folding switch statements on machines using casesi (such as
1348 the VAX). */
1349 if (GET_CODE (op) == TRUNCATE
1350 && GET_MODE (XEXP (op, 0)) == mode
1351 && GET_CODE (XEXP (op, 0)) == MINUS
1352 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1353 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1354 return XEXP (op, 0);
1356 /* Extending a widening multiplication should be canonicalized to
1357 a wider widening multiplication. */
1358 if (GET_CODE (op) == MULT)
1360 rtx lhs = XEXP (op, 0);
1361 rtx rhs = XEXP (op, 1);
1362 enum rtx_code lcode = GET_CODE (lhs);
1363 enum rtx_code rcode = GET_CODE (rhs);
1365 /* Widening multiplies usually extend both operands, but sometimes
1366 they use a shift to extract a portion of a register. */
1367 if ((lcode == SIGN_EXTEND
1368 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1369 && (rcode == SIGN_EXTEND
1370 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1372 machine_mode lmode = GET_MODE (lhs);
1373 machine_mode rmode = GET_MODE (rhs);
1374 int bits;
1376 if (lcode == ASHIFTRT)
1377 /* Number of bits not shifted off the end. */
1378 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1379 else /* lcode == SIGN_EXTEND */
1380 /* Size of inner mode. */
1381 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1383 if (rcode == ASHIFTRT)
1384 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1385 else /* rcode == SIGN_EXTEND */
1386 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1388 /* We can only widen multiplies if the result is mathematiclly
1389 equivalent. I.e. if overflow was impossible. */
1390 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1391 return simplify_gen_binary
1392 (MULT, mode,
1393 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1394 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1398 /* Check for a sign extension of a subreg of a promoted
1399 variable, where the promotion is sign-extended, and the
1400 target mode is the same as the variable's promotion. */
1401 if (GET_CODE (op) == SUBREG
1402 && SUBREG_PROMOTED_VAR_P (op)
1403 && SUBREG_PROMOTED_SIGNED_P (op)
1404 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1406 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1407 if (temp)
1408 return temp;
1411 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1412 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1413 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1415 gcc_assert (GET_MODE_PRECISION (mode)
1416 > GET_MODE_PRECISION (GET_MODE (op)));
1417 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1418 GET_MODE (XEXP (op, 0)));
1421 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1422 is (sign_extend:M (subreg:O <X>)) if there is mode with
1423 GET_MODE_BITSIZE (N) - I bits.
1424 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1425 is similarly (zero_extend:M (subreg:O <X>)). */
1426 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1427 && GET_CODE (XEXP (op, 0)) == ASHIFT
1428 && CONST_INT_P (XEXP (op, 1))
1429 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1430 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1432 machine_mode tmode
1433 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1434 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1435 gcc_assert (GET_MODE_BITSIZE (mode)
1436 > GET_MODE_BITSIZE (GET_MODE (op)));
1437 if (tmode != BLKmode)
1439 rtx inner =
1440 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1441 if (inner)
1442 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1443 ? SIGN_EXTEND : ZERO_EXTEND,
1444 mode, inner, tmode);
1448 #if defined(POINTERS_EXTEND_UNSIGNED)
1449 /* As we do not know which address space the pointer is referring to,
1450 we can do this only if the target does not support different pointer
1451 or address modes depending on the address space. */
1452 if (target_default_pointer_address_modes_p ()
1453 && ! POINTERS_EXTEND_UNSIGNED
1454 && mode == Pmode && GET_MODE (op) == ptr_mode
1455 && (CONSTANT_P (op)
1456 || (GET_CODE (op) == SUBREG
1457 && REG_P (SUBREG_REG (op))
1458 && REG_POINTER (SUBREG_REG (op))
1459 && GET_MODE (SUBREG_REG (op)) == Pmode))
1460 && !targetm.have_ptr_extend ())
1461 return convert_memory_address (Pmode, op);
1462 #endif
1463 break;
1465 case ZERO_EXTEND:
1466 /* Check for a zero extension of a subreg of a promoted
1467 variable, where the promotion is zero-extended, and the
1468 target mode is the same as the variable's promotion. */
1469 if (GET_CODE (op) == SUBREG
1470 && SUBREG_PROMOTED_VAR_P (op)
1471 && SUBREG_PROMOTED_UNSIGNED_P (op)
1472 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1474 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1475 if (temp)
1476 return temp;
1479 /* Extending a widening multiplication should be canonicalized to
1480 a wider widening multiplication. */
1481 if (GET_CODE (op) == MULT)
1483 rtx lhs = XEXP (op, 0);
1484 rtx rhs = XEXP (op, 1);
1485 enum rtx_code lcode = GET_CODE (lhs);
1486 enum rtx_code rcode = GET_CODE (rhs);
1488 /* Widening multiplies usually extend both operands, but sometimes
1489 they use a shift to extract a portion of a register. */
1490 if ((lcode == ZERO_EXTEND
1491 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1492 && (rcode == ZERO_EXTEND
1493 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1495 machine_mode lmode = GET_MODE (lhs);
1496 machine_mode rmode = GET_MODE (rhs);
1497 int bits;
1499 if (lcode == LSHIFTRT)
1500 /* Number of bits not shifted off the end. */
1501 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1502 else /* lcode == ZERO_EXTEND */
1503 /* Size of inner mode. */
1504 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1506 if (rcode == LSHIFTRT)
1507 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1508 else /* rcode == ZERO_EXTEND */
1509 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1511 /* We can only widen multiplies if the result is mathematiclly
1512 equivalent. I.e. if overflow was impossible. */
1513 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1514 return simplify_gen_binary
1515 (MULT, mode,
1516 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1517 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1521 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1522 if (GET_CODE (op) == ZERO_EXTEND)
1523 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1524 GET_MODE (XEXP (op, 0)));
1526 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1527 is (zero_extend:M (subreg:O <X>)) if there is mode with
1528 GET_MODE_PRECISION (N) - I bits. */
1529 if (GET_CODE (op) == LSHIFTRT
1530 && GET_CODE (XEXP (op, 0)) == ASHIFT
1531 && CONST_INT_P (XEXP (op, 1))
1532 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1533 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1535 machine_mode tmode
1536 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1537 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1538 if (tmode != BLKmode)
1540 rtx inner =
1541 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1542 if (inner)
1543 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1547 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1548 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1549 of mode N. E.g.
1550 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1551 (and:SI (reg:SI) (const_int 63)). */
1552 if (GET_CODE (op) == SUBREG
1553 && GET_MODE_PRECISION (GET_MODE (op))
1554 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1555 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1556 <= HOST_BITS_PER_WIDE_INT
1557 && GET_MODE_PRECISION (mode)
1558 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1559 && subreg_lowpart_p (op)
1560 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1561 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1563 if (GET_MODE_PRECISION (mode)
1564 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1565 return SUBREG_REG (op);
1566 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1567 GET_MODE (SUBREG_REG (op)));
1570 #if defined(POINTERS_EXTEND_UNSIGNED)
1571 /* As we do not know which address space the pointer is referring to,
1572 we can do this only if the target does not support different pointer
1573 or address modes depending on the address space. */
1574 if (target_default_pointer_address_modes_p ()
1575 && POINTERS_EXTEND_UNSIGNED > 0
1576 && mode == Pmode && GET_MODE (op) == ptr_mode
1577 && (CONSTANT_P (op)
1578 || (GET_CODE (op) == SUBREG
1579 && REG_P (SUBREG_REG (op))
1580 && REG_POINTER (SUBREG_REG (op))
1581 && GET_MODE (SUBREG_REG (op)) == Pmode))
1582 && !targetm.have_ptr_extend ())
1583 return convert_memory_address (Pmode, op);
1584 #endif
1585 break;
1587 default:
1588 break;
1591 return 0;
1594 /* Try to compute the value of a unary operation CODE whose output mode is to
1595 be MODE with input operand OP whose mode was originally OP_MODE.
1596 Return zero if the value cannot be computed. */
1598 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1599 rtx op, machine_mode op_mode)
1601 unsigned int width = GET_MODE_PRECISION (mode);
1603 if (code == VEC_DUPLICATE)
1605 gcc_assert (VECTOR_MODE_P (mode));
1606 if (GET_MODE (op) != VOIDmode)
1608 if (!VECTOR_MODE_P (GET_MODE (op)))
1609 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1610 else
1611 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1612 (GET_MODE (op)));
1614 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1615 || GET_CODE (op) == CONST_VECTOR)
1617 int elt_size = GET_MODE_UNIT_SIZE (mode);
1618 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1619 rtvec v = rtvec_alloc (n_elts);
1620 unsigned int i;
1622 if (GET_CODE (op) != CONST_VECTOR)
1623 for (i = 0; i < n_elts; i++)
1624 RTVEC_ELT (v, i) = op;
1625 else
1627 machine_mode inmode = GET_MODE (op);
1628 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1629 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1631 gcc_assert (in_n_elts < n_elts);
1632 gcc_assert ((n_elts % in_n_elts) == 0);
1633 for (i = 0; i < n_elts; i++)
1634 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1636 return gen_rtx_CONST_VECTOR (mode, v);
1640 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1642 int elt_size = GET_MODE_UNIT_SIZE (mode);
1643 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1644 machine_mode opmode = GET_MODE (op);
1645 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1646 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1647 rtvec v = rtvec_alloc (n_elts);
1648 unsigned int i;
1650 gcc_assert (op_n_elts == n_elts);
1651 for (i = 0; i < n_elts; i++)
1653 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1654 CONST_VECTOR_ELT (op, i),
1655 GET_MODE_INNER (opmode));
1656 if (!x)
1657 return 0;
1658 RTVEC_ELT (v, i) = x;
1660 return gen_rtx_CONST_VECTOR (mode, v);
1663 /* The order of these tests is critical so that, for example, we don't
1664 check the wrong mode (input vs. output) for a conversion operation,
1665 such as FIX. At some point, this should be simplified. */
1667 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1669 REAL_VALUE_TYPE d;
1671 if (op_mode == VOIDmode)
1673 /* CONST_INT have VOIDmode as the mode. We assume that all
1674 the bits of the constant are significant, though, this is
1675 a dangerous assumption as many times CONST_INTs are
1676 created and used with garbage in the bits outside of the
1677 precision of the implied mode of the const_int. */
1678 op_mode = MAX_MODE_INT;
1681 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1682 d = real_value_truncate (mode, d);
1683 return const_double_from_real_value (d, mode);
1685 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1687 REAL_VALUE_TYPE d;
1689 if (op_mode == VOIDmode)
1691 /* CONST_INT have VOIDmode as the mode. We assume that all
1692 the bits of the constant are significant, though, this is
1693 a dangerous assumption as many times CONST_INTs are
1694 created and used with garbage in the bits outside of the
1695 precision of the implied mode of the const_int. */
1696 op_mode = MAX_MODE_INT;
1699 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1700 d = real_value_truncate (mode, d);
1701 return const_double_from_real_value (d, mode);
1704 if (CONST_SCALAR_INT_P (op) && width > 0)
1706 wide_int result;
1707 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1708 rtx_mode_t op0 = std::make_pair (op, imode);
1709 int int_value;
1711 #if TARGET_SUPPORTS_WIDE_INT == 0
1712 /* This assert keeps the simplification from producing a result
1713 that cannot be represented in a CONST_DOUBLE but a lot of
1714 upstream callers expect that this function never fails to
1715 simplify something and so you if you added this to the test
1716 above the code would die later anyway. If this assert
1717 happens, you just need to make the port support wide int. */
1718 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1719 #endif
1721 switch (code)
1723 case NOT:
1724 result = wi::bit_not (op0);
1725 break;
1727 case NEG:
1728 result = wi::neg (op0);
1729 break;
1731 case ABS:
1732 result = wi::abs (op0);
1733 break;
1735 case FFS:
1736 result = wi::shwi (wi::ffs (op0), mode);
1737 break;
1739 case CLZ:
1740 if (wi::ne_p (op0, 0))
1741 int_value = wi::clz (op0);
1742 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1743 int_value = GET_MODE_PRECISION (mode);
1744 result = wi::shwi (int_value, mode);
1745 break;
1747 case CLRSB:
1748 result = wi::shwi (wi::clrsb (op0), mode);
1749 break;
1751 case CTZ:
1752 if (wi::ne_p (op0, 0))
1753 int_value = wi::ctz (op0);
1754 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1755 int_value = GET_MODE_PRECISION (mode);
1756 result = wi::shwi (int_value, mode);
1757 break;
1759 case POPCOUNT:
1760 result = wi::shwi (wi::popcount (op0), mode);
1761 break;
1763 case PARITY:
1764 result = wi::shwi (wi::parity (op0), mode);
1765 break;
1767 case BSWAP:
1768 result = wide_int (op0).bswap ();
1769 break;
1771 case TRUNCATE:
1772 case ZERO_EXTEND:
1773 result = wide_int::from (op0, width, UNSIGNED);
1774 break;
1776 case SIGN_EXTEND:
1777 result = wide_int::from (op0, width, SIGNED);
1778 break;
1780 case SQRT:
1781 default:
1782 return 0;
1785 return immed_wide_int_const (result, mode);
1788 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1789 && SCALAR_FLOAT_MODE_P (mode)
1790 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1792 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1793 switch (code)
1795 case SQRT:
1796 return 0;
1797 case ABS:
1798 d = real_value_abs (&d);
1799 break;
1800 case NEG:
1801 d = real_value_negate (&d);
1802 break;
1803 case FLOAT_TRUNCATE:
1804 d = real_value_truncate (mode, d);
1805 break;
1806 case FLOAT_EXTEND:
1807 /* All this does is change the mode, unless changing
1808 mode class. */
1809 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1810 real_convert (&d, mode, &d);
1811 break;
1812 case FIX:
1813 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1814 break;
1815 case NOT:
1817 long tmp[4];
1818 int i;
1820 real_to_target (tmp, &d, GET_MODE (op));
1821 for (i = 0; i < 4; i++)
1822 tmp[i] = ~tmp[i];
1823 real_from_target (&d, tmp, mode);
1824 break;
1826 default:
1827 gcc_unreachable ();
1829 return const_double_from_real_value (d, mode);
1831 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1832 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1833 && GET_MODE_CLASS (mode) == MODE_INT
1834 && width > 0)
1836 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1837 operators are intentionally left unspecified (to ease implementation
1838 by target backends), for consistency, this routine implements the
1839 same semantics for constant folding as used by the middle-end. */
1841 /* This was formerly used only for non-IEEE float.
1842 eggert@twinsun.com says it is safe for IEEE also. */
1843 REAL_VALUE_TYPE t;
1844 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1845 wide_int wmax, wmin;
1846 /* This is part of the abi to real_to_integer, but we check
1847 things before making this call. */
1848 bool fail;
1850 switch (code)
1852 case FIX:
1853 if (REAL_VALUE_ISNAN (*x))
1854 return const0_rtx;
1856 /* Test against the signed upper bound. */
1857 wmax = wi::max_value (width, SIGNED);
1858 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1859 if (real_less (&t, x))
1860 return immed_wide_int_const (wmax, mode);
1862 /* Test against the signed lower bound. */
1863 wmin = wi::min_value (width, SIGNED);
1864 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1865 if (real_less (x, &t))
1866 return immed_wide_int_const (wmin, mode);
1868 return immed_wide_int_const (real_to_integer (x, &fail, width),
1869 mode);
1871 case UNSIGNED_FIX:
1872 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1873 return const0_rtx;
1875 /* Test against the unsigned upper bound. */
1876 wmax = wi::max_value (width, UNSIGNED);
1877 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1878 if (real_less (&t, x))
1879 return immed_wide_int_const (wmax, mode);
1881 return immed_wide_int_const (real_to_integer (x, &fail, width),
1882 mode);
1884 default:
1885 gcc_unreachable ();
1889 return NULL_RTX;
1892 /* Subroutine of simplify_binary_operation to simplify a binary operation
1893 CODE that can commute with byte swapping, with result mode MODE and
1894 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1895 Return zero if no simplification or canonicalization is possible. */
1897 static rtx
1898 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1899 rtx op0, rtx op1)
1901 rtx tem;
1903 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1904 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1906 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1907 simplify_gen_unary (BSWAP, mode, op1, mode));
1908 return simplify_gen_unary (BSWAP, mode, tem, mode);
1911 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1912 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1914 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1915 return simplify_gen_unary (BSWAP, mode, tem, mode);
1918 return NULL_RTX;
1921 /* Subroutine of simplify_binary_operation to simplify a commutative,
1922 associative binary operation CODE with result mode MODE, operating
1923 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1924 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1925 canonicalization is possible. */
1927 static rtx
1928 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1929 rtx op0, rtx op1)
1931 rtx tem;
1933 /* Linearize the operator to the left. */
1934 if (GET_CODE (op1) == code)
1936 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1937 if (GET_CODE (op0) == code)
1939 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1940 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1943 /* "a op (b op c)" becomes "(b op c) op a". */
1944 if (! swap_commutative_operands_p (op1, op0))
1945 return simplify_gen_binary (code, mode, op1, op0);
1947 std::swap (op0, op1);
1950 if (GET_CODE (op0) == code)
1952 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1953 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1955 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1956 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1959 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1960 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1961 if (tem != 0)
1962 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1964 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1965 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1966 if (tem != 0)
1967 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1970 return 0;
1974 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1975 and OP1. Return 0 if no simplification is possible.
1977 Don't use this for relational operations such as EQ or LT.
1978 Use simplify_relational_operation instead. */
1980 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1981 rtx op0, rtx op1)
1983 rtx trueop0, trueop1;
1984 rtx tem;
1986 /* Relational operations don't work here. We must know the mode
1987 of the operands in order to do the comparison correctly.
1988 Assuming a full word can give incorrect results.
1989 Consider comparing 128 with -128 in QImode. */
1990 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1991 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1993 /* Make sure the constant is second. */
1994 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1995 && swap_commutative_operands_p (op0, op1))
1996 std::swap (op0, op1);
1998 trueop0 = avoid_constant_pool_reference (op0);
1999 trueop1 = avoid_constant_pool_reference (op1);
2001 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2002 if (tem)
2003 return tem;
2004 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2007 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2008 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2009 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2010 actual constants. */
2012 static rtx
2013 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2014 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2016 rtx tem, reversed, opleft, opright;
2017 HOST_WIDE_INT val;
2018 unsigned int width = GET_MODE_PRECISION (mode);
2020 /* Even if we can't compute a constant result,
2021 there are some cases worth simplifying. */
2023 switch (code)
2025 case PLUS:
2026 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2027 when x is NaN, infinite, or finite and nonzero. They aren't
2028 when x is -0 and the rounding mode is not towards -infinity,
2029 since (-0) + 0 is then 0. */
2030 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2031 return op0;
2033 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2034 transformations are safe even for IEEE. */
2035 if (GET_CODE (op0) == NEG)
2036 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2037 else if (GET_CODE (op1) == NEG)
2038 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2040 /* (~a) + 1 -> -a */
2041 if (INTEGRAL_MODE_P (mode)
2042 && GET_CODE (op0) == NOT
2043 && trueop1 == const1_rtx)
2044 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2046 /* Handle both-operands-constant cases. We can only add
2047 CONST_INTs to constants since the sum of relocatable symbols
2048 can't be handled by most assemblers. Don't add CONST_INT
2049 to CONST_INT since overflow won't be computed properly if wider
2050 than HOST_BITS_PER_WIDE_INT. */
2052 if ((GET_CODE (op0) == CONST
2053 || GET_CODE (op0) == SYMBOL_REF
2054 || GET_CODE (op0) == LABEL_REF)
2055 && CONST_INT_P (op1))
2056 return plus_constant (mode, op0, INTVAL (op1));
2057 else if ((GET_CODE (op1) == CONST
2058 || GET_CODE (op1) == SYMBOL_REF
2059 || GET_CODE (op1) == LABEL_REF)
2060 && CONST_INT_P (op0))
2061 return plus_constant (mode, op1, INTVAL (op0));
2063 /* See if this is something like X * C - X or vice versa or
2064 if the multiplication is written as a shift. If so, we can
2065 distribute and make a new multiply, shift, or maybe just
2066 have X (if C is 2 in the example above). But don't make
2067 something more expensive than we had before. */
2069 if (SCALAR_INT_MODE_P (mode))
2071 rtx lhs = op0, rhs = op1;
2073 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2074 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2076 if (GET_CODE (lhs) == NEG)
2078 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2079 lhs = XEXP (lhs, 0);
2081 else if (GET_CODE (lhs) == MULT
2082 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2084 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2085 lhs = XEXP (lhs, 0);
2087 else if (GET_CODE (lhs) == ASHIFT
2088 && CONST_INT_P (XEXP (lhs, 1))
2089 && INTVAL (XEXP (lhs, 1)) >= 0
2090 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2092 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2093 GET_MODE_PRECISION (mode));
2094 lhs = XEXP (lhs, 0);
2097 if (GET_CODE (rhs) == NEG)
2099 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2100 rhs = XEXP (rhs, 0);
2102 else if (GET_CODE (rhs) == MULT
2103 && CONST_INT_P (XEXP (rhs, 1)))
2105 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2106 rhs = XEXP (rhs, 0);
2108 else if (GET_CODE (rhs) == ASHIFT
2109 && CONST_INT_P (XEXP (rhs, 1))
2110 && INTVAL (XEXP (rhs, 1)) >= 0
2111 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2113 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2114 GET_MODE_PRECISION (mode));
2115 rhs = XEXP (rhs, 0);
2118 if (rtx_equal_p (lhs, rhs))
2120 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2121 rtx coeff;
2122 bool speed = optimize_function_for_speed_p (cfun);
2124 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2126 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2127 return (set_src_cost (tem, mode, speed)
2128 <= set_src_cost (orig, mode, speed) ? tem : 0);
2132 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2133 if (CONST_SCALAR_INT_P (op1)
2134 && GET_CODE (op0) == XOR
2135 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2136 && mode_signbit_p (mode, op1))
2137 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2138 simplify_gen_binary (XOR, mode, op1,
2139 XEXP (op0, 1)));
2141 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2142 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2143 && GET_CODE (op0) == MULT
2144 && GET_CODE (XEXP (op0, 0)) == NEG)
2146 rtx in1, in2;
2148 in1 = XEXP (XEXP (op0, 0), 0);
2149 in2 = XEXP (op0, 1);
2150 return simplify_gen_binary (MINUS, mode, op1,
2151 simplify_gen_binary (MULT, mode,
2152 in1, in2));
2155 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2156 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2157 is 1. */
2158 if (COMPARISON_P (op0)
2159 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2160 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2161 && (reversed = reversed_comparison (op0, mode)))
2162 return
2163 simplify_gen_unary (NEG, mode, reversed, mode);
2165 /* If one of the operands is a PLUS or a MINUS, see if we can
2166 simplify this by the associative law.
2167 Don't use the associative law for floating point.
2168 The inaccuracy makes it nonassociative,
2169 and subtle programs can break if operations are associated. */
2171 if (INTEGRAL_MODE_P (mode)
2172 && (plus_minus_operand_p (op0)
2173 || plus_minus_operand_p (op1))
2174 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2175 return tem;
2177 /* Reassociate floating point addition only when the user
2178 specifies associative math operations. */
2179 if (FLOAT_MODE_P (mode)
2180 && flag_associative_math)
2182 tem = simplify_associative_operation (code, mode, op0, op1);
2183 if (tem)
2184 return tem;
2186 break;
2188 case COMPARE:
2189 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2190 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2191 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2192 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2194 rtx xop00 = XEXP (op0, 0);
2195 rtx xop10 = XEXP (op1, 0);
2197 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2198 return xop00;
2200 if (REG_P (xop00) && REG_P (xop10)
2201 && GET_MODE (xop00) == GET_MODE (xop10)
2202 && REGNO (xop00) == REGNO (xop10)
2203 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2204 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2205 return xop00;
2207 break;
2209 case MINUS:
2210 /* We can't assume x-x is 0 even with non-IEEE floating point,
2211 but since it is zero except in very strange circumstances, we
2212 will treat it as zero with -ffinite-math-only. */
2213 if (rtx_equal_p (trueop0, trueop1)
2214 && ! side_effects_p (op0)
2215 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2216 return CONST0_RTX (mode);
2218 /* Change subtraction from zero into negation. (0 - x) is the
2219 same as -x when x is NaN, infinite, or finite and nonzero.
2220 But if the mode has signed zeros, and does not round towards
2221 -infinity, then 0 - 0 is 0, not -0. */
2222 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2223 return simplify_gen_unary (NEG, mode, op1, mode);
2225 /* (-1 - a) is ~a. */
2226 if (trueop0 == constm1_rtx)
2227 return simplify_gen_unary (NOT, mode, op1, mode);
2229 /* Subtracting 0 has no effect unless the mode has signed zeros
2230 and supports rounding towards -infinity. In such a case,
2231 0 - 0 is -0. */
2232 if (!(HONOR_SIGNED_ZEROS (mode)
2233 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2234 && trueop1 == CONST0_RTX (mode))
2235 return op0;
2237 /* See if this is something like X * C - X or vice versa or
2238 if the multiplication is written as a shift. If so, we can
2239 distribute and make a new multiply, shift, or maybe just
2240 have X (if C is 2 in the example above). But don't make
2241 something more expensive than we had before. */
2243 if (SCALAR_INT_MODE_P (mode))
2245 rtx lhs = op0, rhs = op1;
2247 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2248 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2250 if (GET_CODE (lhs) == NEG)
2252 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2253 lhs = XEXP (lhs, 0);
2255 else if (GET_CODE (lhs) == MULT
2256 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2258 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2259 lhs = XEXP (lhs, 0);
2261 else if (GET_CODE (lhs) == ASHIFT
2262 && CONST_INT_P (XEXP (lhs, 1))
2263 && INTVAL (XEXP (lhs, 1)) >= 0
2264 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2266 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2267 GET_MODE_PRECISION (mode));
2268 lhs = XEXP (lhs, 0);
2271 if (GET_CODE (rhs) == NEG)
2273 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2274 rhs = XEXP (rhs, 0);
2276 else if (GET_CODE (rhs) == MULT
2277 && CONST_INT_P (XEXP (rhs, 1)))
2279 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2280 rhs = XEXP (rhs, 0);
2282 else if (GET_CODE (rhs) == ASHIFT
2283 && CONST_INT_P (XEXP (rhs, 1))
2284 && INTVAL (XEXP (rhs, 1)) >= 0
2285 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2287 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2288 GET_MODE_PRECISION (mode));
2289 negcoeff1 = -negcoeff1;
2290 rhs = XEXP (rhs, 0);
2293 if (rtx_equal_p (lhs, rhs))
2295 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2296 rtx coeff;
2297 bool speed = optimize_function_for_speed_p (cfun);
2299 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2301 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2302 return (set_src_cost (tem, mode, speed)
2303 <= set_src_cost (orig, mode, speed) ? tem : 0);
2307 /* (a - (-b)) -> (a + b). True even for IEEE. */
2308 if (GET_CODE (op1) == NEG)
2309 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2311 /* (-x - c) may be simplified as (-c - x). */
2312 if (GET_CODE (op0) == NEG
2313 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2315 tem = simplify_unary_operation (NEG, mode, op1, mode);
2316 if (tem)
2317 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2320 /* Don't let a relocatable value get a negative coeff. */
2321 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2322 return simplify_gen_binary (PLUS, mode,
2323 op0,
2324 neg_const_int (mode, op1));
2326 /* (x - (x & y)) -> (x & ~y) */
2327 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2329 if (rtx_equal_p (op0, XEXP (op1, 0)))
2331 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2332 GET_MODE (XEXP (op1, 1)));
2333 return simplify_gen_binary (AND, mode, op0, tem);
2335 if (rtx_equal_p (op0, XEXP (op1, 1)))
2337 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2338 GET_MODE (XEXP (op1, 0)));
2339 return simplify_gen_binary (AND, mode, op0, tem);
2343 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2344 by reversing the comparison code if valid. */
2345 if (STORE_FLAG_VALUE == 1
2346 && trueop0 == const1_rtx
2347 && COMPARISON_P (op1)
2348 && (reversed = reversed_comparison (op1, mode)))
2349 return reversed;
2351 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2352 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2353 && GET_CODE (op1) == MULT
2354 && GET_CODE (XEXP (op1, 0)) == NEG)
2356 rtx in1, in2;
2358 in1 = XEXP (XEXP (op1, 0), 0);
2359 in2 = XEXP (op1, 1);
2360 return simplify_gen_binary (PLUS, mode,
2361 simplify_gen_binary (MULT, mode,
2362 in1, in2),
2363 op0);
2366 /* Canonicalize (minus (neg A) (mult B C)) to
2367 (minus (mult (neg B) C) A). */
2368 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2369 && GET_CODE (op1) == MULT
2370 && GET_CODE (op0) == NEG)
2372 rtx in1, in2;
2374 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2375 in2 = XEXP (op1, 1);
2376 return simplify_gen_binary (MINUS, mode,
2377 simplify_gen_binary (MULT, mode,
2378 in1, in2),
2379 XEXP (op0, 0));
2382 /* If one of the operands is a PLUS or a MINUS, see if we can
2383 simplify this by the associative law. This will, for example,
2384 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2385 Don't use the associative law for floating point.
2386 The inaccuracy makes it nonassociative,
2387 and subtle programs can break if operations are associated. */
2389 if (INTEGRAL_MODE_P (mode)
2390 && (plus_minus_operand_p (op0)
2391 || plus_minus_operand_p (op1))
2392 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2393 return tem;
2394 break;
2396 case MULT:
2397 if (trueop1 == constm1_rtx)
2398 return simplify_gen_unary (NEG, mode, op0, mode);
2400 if (GET_CODE (op0) == NEG)
2402 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2403 /* If op1 is a MULT as well and simplify_unary_operation
2404 just moved the NEG to the second operand, simplify_gen_binary
2405 below could through simplify_associative_operation move
2406 the NEG around again and recurse endlessly. */
2407 if (temp
2408 && GET_CODE (op1) == MULT
2409 && GET_CODE (temp) == MULT
2410 && XEXP (op1, 0) == XEXP (temp, 0)
2411 && GET_CODE (XEXP (temp, 1)) == NEG
2412 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2413 temp = NULL_RTX;
2414 if (temp)
2415 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2417 if (GET_CODE (op1) == NEG)
2419 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2420 /* If op0 is a MULT as well and simplify_unary_operation
2421 just moved the NEG to the second operand, simplify_gen_binary
2422 below could through simplify_associative_operation move
2423 the NEG around again and recurse endlessly. */
2424 if (temp
2425 && GET_CODE (op0) == MULT
2426 && GET_CODE (temp) == MULT
2427 && XEXP (op0, 0) == XEXP (temp, 0)
2428 && GET_CODE (XEXP (temp, 1)) == NEG
2429 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2430 temp = NULL_RTX;
2431 if (temp)
2432 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2435 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2436 x is NaN, since x * 0 is then also NaN. Nor is it valid
2437 when the mode has signed zeros, since multiplying a negative
2438 number by 0 will give -0, not 0. */
2439 if (!HONOR_NANS (mode)
2440 && !HONOR_SIGNED_ZEROS (mode)
2441 && trueop1 == CONST0_RTX (mode)
2442 && ! side_effects_p (op0))
2443 return op1;
2445 /* In IEEE floating point, x*1 is not equivalent to x for
2446 signalling NaNs. */
2447 if (!HONOR_SNANS (mode)
2448 && trueop1 == CONST1_RTX (mode))
2449 return op0;
2451 /* Convert multiply by constant power of two into shift. */
2452 if (CONST_SCALAR_INT_P (trueop1))
2454 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2455 if (val >= 0)
2456 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2459 /* x*2 is x+x and x*(-1) is -x */
2460 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2461 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2462 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2463 && GET_MODE (op0) == mode)
2465 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2467 if (real_equal (d1, &dconst2))
2468 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2470 if (!HONOR_SNANS (mode)
2471 && real_equal (d1, &dconstm1))
2472 return simplify_gen_unary (NEG, mode, op0, mode);
2475 /* Optimize -x * -x as x * x. */
2476 if (FLOAT_MODE_P (mode)
2477 && GET_CODE (op0) == NEG
2478 && GET_CODE (op1) == NEG
2479 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2480 && !side_effects_p (XEXP (op0, 0)))
2481 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2483 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2484 if (SCALAR_FLOAT_MODE_P (mode)
2485 && GET_CODE (op0) == ABS
2486 && GET_CODE (op1) == ABS
2487 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2488 && !side_effects_p (XEXP (op0, 0)))
2489 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2491 /* Reassociate multiplication, but for floating point MULTs
2492 only when the user specifies unsafe math optimizations. */
2493 if (! FLOAT_MODE_P (mode)
2494 || flag_unsafe_math_optimizations)
2496 tem = simplify_associative_operation (code, mode, op0, op1);
2497 if (tem)
2498 return tem;
2500 break;
2502 case IOR:
2503 if (trueop1 == CONST0_RTX (mode))
2504 return op0;
2505 if (INTEGRAL_MODE_P (mode)
2506 && trueop1 == CONSTM1_RTX (mode)
2507 && !side_effects_p (op0))
2508 return op1;
2509 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2510 return op0;
2511 /* A | (~A) -> -1 */
2512 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2513 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2514 && ! side_effects_p (op0)
2515 && SCALAR_INT_MODE_P (mode))
2516 return constm1_rtx;
2518 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2519 if (CONST_INT_P (op1)
2520 && HWI_COMPUTABLE_MODE_P (mode)
2521 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2522 && !side_effects_p (op0))
2523 return op1;
2525 /* Canonicalize (X & C1) | C2. */
2526 if (GET_CODE (op0) == AND
2527 && CONST_INT_P (trueop1)
2528 && CONST_INT_P (XEXP (op0, 1)))
2530 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2531 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2532 HOST_WIDE_INT c2 = INTVAL (trueop1);
2534 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2535 if ((c1 & c2) == c1
2536 && !side_effects_p (XEXP (op0, 0)))
2537 return trueop1;
2539 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2540 if (((c1|c2) & mask) == mask)
2541 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2543 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2544 if (((c1 & ~c2) & mask) != (c1 & mask))
2546 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2547 gen_int_mode (c1 & ~c2, mode));
2548 return simplify_gen_binary (IOR, mode, tem, op1);
2552 /* Convert (A & B) | A to A. */
2553 if (GET_CODE (op0) == AND
2554 && (rtx_equal_p (XEXP (op0, 0), op1)
2555 || rtx_equal_p (XEXP (op0, 1), op1))
2556 && ! side_effects_p (XEXP (op0, 0))
2557 && ! side_effects_p (XEXP (op0, 1)))
2558 return op1;
2560 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2561 mode size to (rotate A CX). */
2563 if (GET_CODE (op1) == ASHIFT
2564 || GET_CODE (op1) == SUBREG)
2566 opleft = op1;
2567 opright = op0;
2569 else
2571 opright = op1;
2572 opleft = op0;
2575 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2576 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2577 && CONST_INT_P (XEXP (opleft, 1))
2578 && CONST_INT_P (XEXP (opright, 1))
2579 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2580 == GET_MODE_PRECISION (mode)))
2581 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2583 /* Same, but for ashift that has been "simplified" to a wider mode
2584 by simplify_shift_const. */
2586 if (GET_CODE (opleft) == SUBREG
2587 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2588 && GET_CODE (opright) == LSHIFTRT
2589 && GET_CODE (XEXP (opright, 0)) == SUBREG
2590 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2591 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2592 && (GET_MODE_SIZE (GET_MODE (opleft))
2593 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2594 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2595 SUBREG_REG (XEXP (opright, 0)))
2596 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2597 && CONST_INT_P (XEXP (opright, 1))
2598 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2599 == GET_MODE_PRECISION (mode)))
2600 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2601 XEXP (SUBREG_REG (opleft), 1));
2603 /* If we have (ior (and (X C1) C2)), simplify this by making
2604 C1 as small as possible if C1 actually changes. */
2605 if (CONST_INT_P (op1)
2606 && (HWI_COMPUTABLE_MODE_P (mode)
2607 || INTVAL (op1) > 0)
2608 && GET_CODE (op0) == AND
2609 && CONST_INT_P (XEXP (op0, 1))
2610 && CONST_INT_P (op1)
2611 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2613 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2614 gen_int_mode (UINTVAL (XEXP (op0, 1))
2615 & ~UINTVAL (op1),
2616 mode));
2617 return simplify_gen_binary (IOR, mode, tmp, op1);
2620 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2621 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2622 the PLUS does not affect any of the bits in OP1: then we can do
2623 the IOR as a PLUS and we can associate. This is valid if OP1
2624 can be safely shifted left C bits. */
2625 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2626 && GET_CODE (XEXP (op0, 0)) == PLUS
2627 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2628 && CONST_INT_P (XEXP (op0, 1))
2629 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2631 int count = INTVAL (XEXP (op0, 1));
2632 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2634 if (mask >> count == INTVAL (trueop1)
2635 && trunc_int_for_mode (mask, mode) == mask
2636 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2637 return simplify_gen_binary (ASHIFTRT, mode,
2638 plus_constant (mode, XEXP (op0, 0),
2639 mask),
2640 XEXP (op0, 1));
2643 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2644 if (tem)
2645 return tem;
2647 tem = simplify_associative_operation (code, mode, op0, op1);
2648 if (tem)
2649 return tem;
2650 break;
2652 case XOR:
2653 if (trueop1 == CONST0_RTX (mode))
2654 return op0;
2655 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2656 return simplify_gen_unary (NOT, mode, op0, mode);
2657 if (rtx_equal_p (trueop0, trueop1)
2658 && ! side_effects_p (op0)
2659 && GET_MODE_CLASS (mode) != MODE_CC)
2660 return CONST0_RTX (mode);
2662 /* Canonicalize XOR of the most significant bit to PLUS. */
2663 if (CONST_SCALAR_INT_P (op1)
2664 && mode_signbit_p (mode, op1))
2665 return simplify_gen_binary (PLUS, mode, op0, op1);
2666 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2667 if (CONST_SCALAR_INT_P (op1)
2668 && GET_CODE (op0) == PLUS
2669 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2670 && mode_signbit_p (mode, XEXP (op0, 1)))
2671 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2672 simplify_gen_binary (XOR, mode, op1,
2673 XEXP (op0, 1)));
2675 /* If we are XORing two things that have no bits in common,
2676 convert them into an IOR. This helps to detect rotation encoded
2677 using those methods and possibly other simplifications. */
2679 if (HWI_COMPUTABLE_MODE_P (mode)
2680 && (nonzero_bits (op0, mode)
2681 & nonzero_bits (op1, mode)) == 0)
2682 return (simplify_gen_binary (IOR, mode, op0, op1));
2684 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2685 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2686 (NOT y). */
2688 int num_negated = 0;
2690 if (GET_CODE (op0) == NOT)
2691 num_negated++, op0 = XEXP (op0, 0);
2692 if (GET_CODE (op1) == NOT)
2693 num_negated++, op1 = XEXP (op1, 0);
2695 if (num_negated == 2)
2696 return simplify_gen_binary (XOR, mode, op0, op1);
2697 else if (num_negated == 1)
2698 return simplify_gen_unary (NOT, mode,
2699 simplify_gen_binary (XOR, mode, op0, op1),
2700 mode);
2703 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2704 correspond to a machine insn or result in further simplifications
2705 if B is a constant. */
2707 if (GET_CODE (op0) == AND
2708 && rtx_equal_p (XEXP (op0, 1), op1)
2709 && ! side_effects_p (op1))
2710 return simplify_gen_binary (AND, mode,
2711 simplify_gen_unary (NOT, mode,
2712 XEXP (op0, 0), mode),
2713 op1);
2715 else if (GET_CODE (op0) == AND
2716 && rtx_equal_p (XEXP (op0, 0), op1)
2717 && ! side_effects_p (op1))
2718 return simplify_gen_binary (AND, mode,
2719 simplify_gen_unary (NOT, mode,
2720 XEXP (op0, 1), mode),
2721 op1);
2723 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2724 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2725 out bits inverted twice and not set by C. Similarly, given
2726 (xor (and (xor A B) C) D), simplify without inverting C in
2727 the xor operand: (xor (and A C) (B&C)^D).
2729 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2730 && GET_CODE (XEXP (op0, 0)) == XOR
2731 && CONST_INT_P (op1)
2732 && CONST_INT_P (XEXP (op0, 1))
2733 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2735 enum rtx_code op = GET_CODE (op0);
2736 rtx a = XEXP (XEXP (op0, 0), 0);
2737 rtx b = XEXP (XEXP (op0, 0), 1);
2738 rtx c = XEXP (op0, 1);
2739 rtx d = op1;
2740 HOST_WIDE_INT bval = INTVAL (b);
2741 HOST_WIDE_INT cval = INTVAL (c);
2742 HOST_WIDE_INT dval = INTVAL (d);
2743 HOST_WIDE_INT xcval;
2745 if (op == IOR)
2746 xcval = ~cval;
2747 else
2748 xcval = cval;
2750 return simplify_gen_binary (XOR, mode,
2751 simplify_gen_binary (op, mode, a, c),
2752 gen_int_mode ((bval & xcval) ^ dval,
2753 mode));
2756 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2757 we can transform like this:
2758 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2759 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2760 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2761 Attempt a few simplifications when B and C are both constants. */
2762 if (GET_CODE (op0) == AND
2763 && CONST_INT_P (op1)
2764 && CONST_INT_P (XEXP (op0, 1)))
2766 rtx a = XEXP (op0, 0);
2767 rtx b = XEXP (op0, 1);
2768 rtx c = op1;
2769 HOST_WIDE_INT bval = INTVAL (b);
2770 HOST_WIDE_INT cval = INTVAL (c);
2772 /* Instead of computing ~A&C, we compute its negated value,
2773 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2774 optimize for sure. If it does not simplify, we still try
2775 to compute ~A&C below, but since that always allocates
2776 RTL, we don't try that before committing to returning a
2777 simplified expression. */
2778 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2779 GEN_INT (~cval));
2781 if ((~cval & bval) == 0)
2783 rtx na_c = NULL_RTX;
2784 if (n_na_c)
2785 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2786 else
2788 /* If ~A does not simplify, don't bother: we don't
2789 want to simplify 2 operations into 3, and if na_c
2790 were to simplify with na, n_na_c would have
2791 simplified as well. */
2792 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2793 if (na)
2794 na_c = simplify_gen_binary (AND, mode, na, c);
2797 /* Try to simplify ~A&C | ~B&C. */
2798 if (na_c != NULL_RTX)
2799 return simplify_gen_binary (IOR, mode, na_c,
2800 gen_int_mode (~bval & cval, mode));
2802 else
2804 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2805 if (n_na_c == CONSTM1_RTX (mode))
2807 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2808 gen_int_mode (~cval & bval,
2809 mode));
2810 return simplify_gen_binary (IOR, mode, a_nc_b,
2811 gen_int_mode (~bval & cval,
2812 mode));
2817 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2818 comparison if STORE_FLAG_VALUE is 1. */
2819 if (STORE_FLAG_VALUE == 1
2820 && trueop1 == const1_rtx
2821 && COMPARISON_P (op0)
2822 && (reversed = reversed_comparison (op0, mode)))
2823 return reversed;
2825 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2826 is (lt foo (const_int 0)), so we can perform the above
2827 simplification if STORE_FLAG_VALUE is 1. */
2829 if (STORE_FLAG_VALUE == 1
2830 && trueop1 == const1_rtx
2831 && GET_CODE (op0) == LSHIFTRT
2832 && CONST_INT_P (XEXP (op0, 1))
2833 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2834 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2836 /* (xor (comparison foo bar) (const_int sign-bit))
2837 when STORE_FLAG_VALUE is the sign bit. */
2838 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2839 && trueop1 == const_true_rtx
2840 && COMPARISON_P (op0)
2841 && (reversed = reversed_comparison (op0, mode)))
2842 return reversed;
2844 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2845 if (tem)
2846 return tem;
2848 tem = simplify_associative_operation (code, mode, op0, op1);
2849 if (tem)
2850 return tem;
2851 break;
2853 case AND:
2854 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2855 return trueop1;
2856 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2857 return op0;
2858 if (HWI_COMPUTABLE_MODE_P (mode))
2860 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2861 HOST_WIDE_INT nzop1;
2862 if (CONST_INT_P (trueop1))
2864 HOST_WIDE_INT val1 = INTVAL (trueop1);
2865 /* If we are turning off bits already known off in OP0, we need
2866 not do an AND. */
2867 if ((nzop0 & ~val1) == 0)
2868 return op0;
2870 nzop1 = nonzero_bits (trueop1, mode);
2871 /* If we are clearing all the nonzero bits, the result is zero. */
2872 if ((nzop1 & nzop0) == 0
2873 && !side_effects_p (op0) && !side_effects_p (op1))
2874 return CONST0_RTX (mode);
2876 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2877 && GET_MODE_CLASS (mode) != MODE_CC)
2878 return op0;
2879 /* A & (~A) -> 0 */
2880 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2881 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2882 && ! side_effects_p (op0)
2883 && GET_MODE_CLASS (mode) != MODE_CC)
2884 return CONST0_RTX (mode);
2886 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2887 there are no nonzero bits of C outside of X's mode. */
2888 if ((GET_CODE (op0) == SIGN_EXTEND
2889 || GET_CODE (op0) == ZERO_EXTEND)
2890 && CONST_INT_P (trueop1)
2891 && HWI_COMPUTABLE_MODE_P (mode)
2892 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2893 & UINTVAL (trueop1)) == 0)
2895 machine_mode imode = GET_MODE (XEXP (op0, 0));
2896 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2897 gen_int_mode (INTVAL (trueop1),
2898 imode));
2899 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2902 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2903 we might be able to further simplify the AND with X and potentially
2904 remove the truncation altogether. */
2905 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2907 rtx x = XEXP (op0, 0);
2908 machine_mode xmode = GET_MODE (x);
2909 tem = simplify_gen_binary (AND, xmode, x,
2910 gen_int_mode (INTVAL (trueop1), xmode));
2911 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2914 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2915 if (GET_CODE (op0) == IOR
2916 && CONST_INT_P (trueop1)
2917 && CONST_INT_P (XEXP (op0, 1)))
2919 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2920 return simplify_gen_binary (IOR, mode,
2921 simplify_gen_binary (AND, mode,
2922 XEXP (op0, 0), op1),
2923 gen_int_mode (tmp, mode));
2926 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2927 insn (and may simplify more). */
2928 if (GET_CODE (op0) == XOR
2929 && rtx_equal_p (XEXP (op0, 0), op1)
2930 && ! side_effects_p (op1))
2931 return simplify_gen_binary (AND, mode,
2932 simplify_gen_unary (NOT, mode,
2933 XEXP (op0, 1), mode),
2934 op1);
2936 if (GET_CODE (op0) == XOR
2937 && rtx_equal_p (XEXP (op0, 1), op1)
2938 && ! side_effects_p (op1))
2939 return simplify_gen_binary (AND, mode,
2940 simplify_gen_unary (NOT, mode,
2941 XEXP (op0, 0), mode),
2942 op1);
2944 /* Similarly for (~(A ^ B)) & A. */
2945 if (GET_CODE (op0) == NOT
2946 && GET_CODE (XEXP (op0, 0)) == XOR
2947 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2948 && ! side_effects_p (op1))
2949 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2951 if (GET_CODE (op0) == NOT
2952 && GET_CODE (XEXP (op0, 0)) == XOR
2953 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2954 && ! side_effects_p (op1))
2955 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2957 /* Convert (A | B) & A to A. */
2958 if (GET_CODE (op0) == IOR
2959 && (rtx_equal_p (XEXP (op0, 0), op1)
2960 || rtx_equal_p (XEXP (op0, 1), op1))
2961 && ! side_effects_p (XEXP (op0, 0))
2962 && ! side_effects_p (XEXP (op0, 1)))
2963 return op1;
2965 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2966 ((A & N) + B) & M -> (A + B) & M
2967 Similarly if (N & M) == 0,
2968 ((A | N) + B) & M -> (A + B) & M
2969 and for - instead of + and/or ^ instead of |.
2970 Also, if (N & M) == 0, then
2971 (A +- N) & M -> A & M. */
2972 if (CONST_INT_P (trueop1)
2973 && HWI_COMPUTABLE_MODE_P (mode)
2974 && ~UINTVAL (trueop1)
2975 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2976 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2978 rtx pmop[2];
2979 int which;
2981 pmop[0] = XEXP (op0, 0);
2982 pmop[1] = XEXP (op0, 1);
2984 if (CONST_INT_P (pmop[1])
2985 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2986 return simplify_gen_binary (AND, mode, pmop[0], op1);
2988 for (which = 0; which < 2; which++)
2990 tem = pmop[which];
2991 switch (GET_CODE (tem))
2993 case AND:
2994 if (CONST_INT_P (XEXP (tem, 1))
2995 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2996 == UINTVAL (trueop1))
2997 pmop[which] = XEXP (tem, 0);
2998 break;
2999 case IOR:
3000 case XOR:
3001 if (CONST_INT_P (XEXP (tem, 1))
3002 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3003 pmop[which] = XEXP (tem, 0);
3004 break;
3005 default:
3006 break;
3010 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3012 tem = simplify_gen_binary (GET_CODE (op0), mode,
3013 pmop[0], pmop[1]);
3014 return simplify_gen_binary (code, mode, tem, op1);
3018 /* (and X (ior (not X) Y) -> (and X Y) */
3019 if (GET_CODE (op1) == IOR
3020 && GET_CODE (XEXP (op1, 0)) == NOT
3021 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3022 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3024 /* (and (ior (not X) Y) X) -> (and X Y) */
3025 if (GET_CODE (op0) == IOR
3026 && GET_CODE (XEXP (op0, 0)) == NOT
3027 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3028 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3030 /* (and X (ior Y (not X)) -> (and X Y) */
3031 if (GET_CODE (op1) == IOR
3032 && GET_CODE (XEXP (op1, 1)) == NOT
3033 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3034 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3036 /* (and (ior Y (not X)) X) -> (and X Y) */
3037 if (GET_CODE (op0) == IOR
3038 && GET_CODE (XEXP (op0, 1)) == NOT
3039 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3040 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3042 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3043 if (tem)
3044 return tem;
3046 tem = simplify_associative_operation (code, mode, op0, op1);
3047 if (tem)
3048 return tem;
3049 break;
3051 case UDIV:
3052 /* 0/x is 0 (or x&0 if x has side-effects). */
3053 if (trueop0 == CONST0_RTX (mode))
3055 if (side_effects_p (op1))
3056 return simplify_gen_binary (AND, mode, op1, trueop0);
3057 return trueop0;
3059 /* x/1 is x. */
3060 if (trueop1 == CONST1_RTX (mode))
3062 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3063 if (tem)
3064 return tem;
3066 /* Convert divide by power of two into shift. */
3067 if (CONST_INT_P (trueop1)
3068 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3069 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3070 break;
3072 case DIV:
3073 /* Handle floating point and integers separately. */
3074 if (SCALAR_FLOAT_MODE_P (mode))
3076 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3077 safe for modes with NaNs, since 0.0 / 0.0 will then be
3078 NaN rather than 0.0. Nor is it safe for modes with signed
3079 zeros, since dividing 0 by a negative number gives -0.0 */
3080 if (trueop0 == CONST0_RTX (mode)
3081 && !HONOR_NANS (mode)
3082 && !HONOR_SIGNED_ZEROS (mode)
3083 && ! side_effects_p (op1))
3084 return op0;
3085 /* x/1.0 is x. */
3086 if (trueop1 == CONST1_RTX (mode)
3087 && !HONOR_SNANS (mode))
3088 return op0;
3090 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3091 && trueop1 != CONST0_RTX (mode))
3093 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3095 /* x/-1.0 is -x. */
3096 if (real_equal (d1, &dconstm1)
3097 && !HONOR_SNANS (mode))
3098 return simplify_gen_unary (NEG, mode, op0, mode);
3100 /* Change FP division by a constant into multiplication.
3101 Only do this with -freciprocal-math. */
3102 if (flag_reciprocal_math
3103 && !real_equal (d1, &dconst0))
3105 REAL_VALUE_TYPE d;
3106 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3107 tem = const_double_from_real_value (d, mode);
3108 return simplify_gen_binary (MULT, mode, op0, tem);
3112 else if (SCALAR_INT_MODE_P (mode))
3114 /* 0/x is 0 (or x&0 if x has side-effects). */
3115 if (trueop0 == CONST0_RTX (mode)
3116 && !cfun->can_throw_non_call_exceptions)
3118 if (side_effects_p (op1))
3119 return simplify_gen_binary (AND, mode, op1, trueop0);
3120 return trueop0;
3122 /* x/1 is x. */
3123 if (trueop1 == CONST1_RTX (mode))
3125 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3126 if (tem)
3127 return tem;
3129 /* x/-1 is -x. */
3130 if (trueop1 == constm1_rtx)
3132 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3133 if (x)
3134 return simplify_gen_unary (NEG, mode, x, mode);
3137 break;
3139 case UMOD:
3140 /* 0%x is 0 (or x&0 if x has side-effects). */
3141 if (trueop0 == CONST0_RTX (mode))
3143 if (side_effects_p (op1))
3144 return simplify_gen_binary (AND, mode, op1, trueop0);
3145 return trueop0;
3147 /* x%1 is 0 (of x&0 if x has side-effects). */
3148 if (trueop1 == CONST1_RTX (mode))
3150 if (side_effects_p (op0))
3151 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3152 return CONST0_RTX (mode);
3154 /* Implement modulus by power of two as AND. */
3155 if (CONST_INT_P (trueop1)
3156 && exact_log2 (UINTVAL (trueop1)) > 0)
3157 return simplify_gen_binary (AND, mode, op0,
3158 gen_int_mode (INTVAL (op1) - 1, mode));
3159 break;
3161 case MOD:
3162 /* 0%x is 0 (or x&0 if x has side-effects). */
3163 if (trueop0 == CONST0_RTX (mode))
3165 if (side_effects_p (op1))
3166 return simplify_gen_binary (AND, mode, op1, trueop0);
3167 return trueop0;
3169 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3170 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3172 if (side_effects_p (op0))
3173 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3174 return CONST0_RTX (mode);
3176 break;
3178 case ROTATERT:
3179 case ROTATE:
3180 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3181 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3182 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3183 amount instead. */
3184 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3185 if (CONST_INT_P (trueop1)
3186 && IN_RANGE (INTVAL (trueop1),
3187 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3188 GET_MODE_PRECISION (mode) - 1))
3189 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3190 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3191 - INTVAL (trueop1)));
3192 #endif
3193 /* FALLTHRU */
3194 case ASHIFTRT:
3195 if (trueop1 == CONST0_RTX (mode))
3196 return op0;
3197 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3198 return op0;
3199 /* Rotating ~0 always results in ~0. */
3200 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3201 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3202 && ! side_effects_p (op1))
3203 return op0;
3204 /* Given:
3205 scalar modes M1, M2
3206 scalar constants c1, c2
3207 size (M2) > size (M1)
3208 c1 == size (M2) - size (M1)
3209 optimize:
3210 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3211 <low_part>)
3212 (const_int <c2>))
3214 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3215 <low_part>). */
3216 if (code == ASHIFTRT
3217 && !VECTOR_MODE_P (mode)
3218 && SUBREG_P (op0)
3219 && CONST_INT_P (op1)
3220 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3221 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3222 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3223 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3224 > GET_MODE_BITSIZE (mode))
3225 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3226 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3227 - GET_MODE_BITSIZE (mode)))
3228 && subreg_lowpart_p (op0))
3230 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3231 + INTVAL (op1));
3232 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3233 tmp = simplify_gen_binary (ASHIFTRT,
3234 GET_MODE (SUBREG_REG (op0)),
3235 XEXP (SUBREG_REG (op0), 0),
3236 tmp);
3237 return lowpart_subreg (mode, tmp, inner_mode);
3239 canonicalize_shift:
3240 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3242 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3243 if (val != INTVAL (op1))
3244 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3246 break;
3248 case ASHIFT:
3249 case SS_ASHIFT:
3250 case US_ASHIFT:
3251 if (trueop1 == CONST0_RTX (mode))
3252 return op0;
3253 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3254 return op0;
3255 goto canonicalize_shift;
3257 case LSHIFTRT:
3258 if (trueop1 == CONST0_RTX (mode))
3259 return op0;
3260 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3261 return op0;
3262 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3263 if (GET_CODE (op0) == CLZ
3264 && CONST_INT_P (trueop1)
3265 && STORE_FLAG_VALUE == 1
3266 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3268 machine_mode imode = GET_MODE (XEXP (op0, 0));
3269 unsigned HOST_WIDE_INT zero_val = 0;
3271 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3272 && zero_val == GET_MODE_PRECISION (imode)
3273 && INTVAL (trueop1) == exact_log2 (zero_val))
3274 return simplify_gen_relational (EQ, mode, imode,
3275 XEXP (op0, 0), const0_rtx);
3277 goto canonicalize_shift;
3279 case SMIN:
3280 if (width <= HOST_BITS_PER_WIDE_INT
3281 && mode_signbit_p (mode, trueop1)
3282 && ! side_effects_p (op0))
3283 return op1;
3284 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3285 return op0;
3286 tem = simplify_associative_operation (code, mode, op0, op1);
3287 if (tem)
3288 return tem;
3289 break;
3291 case SMAX:
3292 if (width <= HOST_BITS_PER_WIDE_INT
3293 && CONST_INT_P (trueop1)
3294 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3295 && ! side_effects_p (op0))
3296 return op1;
3297 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3298 return op0;
3299 tem = simplify_associative_operation (code, mode, op0, op1);
3300 if (tem)
3301 return tem;
3302 break;
3304 case UMIN:
3305 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3306 return op1;
3307 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3308 return op0;
3309 tem = simplify_associative_operation (code, mode, op0, op1);
3310 if (tem)
3311 return tem;
3312 break;
3314 case UMAX:
3315 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3316 return op1;
3317 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3318 return op0;
3319 tem = simplify_associative_operation (code, mode, op0, op1);
3320 if (tem)
3321 return tem;
3322 break;
3324 case SS_PLUS:
3325 case US_PLUS:
3326 case SS_MINUS:
3327 case US_MINUS:
3328 case SS_MULT:
3329 case US_MULT:
3330 case SS_DIV:
3331 case US_DIV:
3332 /* ??? There are simplifications that can be done. */
3333 return 0;
3335 case VEC_SELECT:
3336 if (!VECTOR_MODE_P (mode))
3338 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3339 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3340 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3341 gcc_assert (XVECLEN (trueop1, 0) == 1);
3342 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3344 if (GET_CODE (trueop0) == CONST_VECTOR)
3345 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3346 (trueop1, 0, 0)));
3348 /* Extract a scalar element from a nested VEC_SELECT expression
3349 (with optional nested VEC_CONCAT expression). Some targets
3350 (i386) extract scalar element from a vector using chain of
3351 nested VEC_SELECT expressions. When input operand is a memory
3352 operand, this operation can be simplified to a simple scalar
3353 load from an offseted memory address. */
3354 if (GET_CODE (trueop0) == VEC_SELECT)
3356 rtx op0 = XEXP (trueop0, 0);
3357 rtx op1 = XEXP (trueop0, 1);
3359 machine_mode opmode = GET_MODE (op0);
3360 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3361 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3363 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3364 int elem;
3366 rtvec vec;
3367 rtx tmp_op, tmp;
3369 gcc_assert (GET_CODE (op1) == PARALLEL);
3370 gcc_assert (i < n_elts);
3372 /* Select element, pointed by nested selector. */
3373 elem = INTVAL (XVECEXP (op1, 0, i));
3375 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3376 if (GET_CODE (op0) == VEC_CONCAT)
3378 rtx op00 = XEXP (op0, 0);
3379 rtx op01 = XEXP (op0, 1);
3381 machine_mode mode00, mode01;
3382 int n_elts00, n_elts01;
3384 mode00 = GET_MODE (op00);
3385 mode01 = GET_MODE (op01);
3387 /* Find out number of elements of each operand. */
3388 if (VECTOR_MODE_P (mode00))
3390 elt_size = GET_MODE_UNIT_SIZE (mode00);
3391 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3393 else
3394 n_elts00 = 1;
3396 if (VECTOR_MODE_P (mode01))
3398 elt_size = GET_MODE_UNIT_SIZE (mode01);
3399 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3401 else
3402 n_elts01 = 1;
3404 gcc_assert (n_elts == n_elts00 + n_elts01);
3406 /* Select correct operand of VEC_CONCAT
3407 and adjust selector. */
3408 if (elem < n_elts01)
3409 tmp_op = op00;
3410 else
3412 tmp_op = op01;
3413 elem -= n_elts00;
3416 else
3417 tmp_op = op0;
3419 vec = rtvec_alloc (1);
3420 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3422 tmp = gen_rtx_fmt_ee (code, mode,
3423 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3424 return tmp;
3426 if (GET_CODE (trueop0) == VEC_DUPLICATE
3427 && GET_MODE (XEXP (trueop0, 0)) == mode)
3428 return XEXP (trueop0, 0);
3430 else
3432 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3433 gcc_assert (GET_MODE_INNER (mode)
3434 == GET_MODE_INNER (GET_MODE (trueop0)));
3435 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3437 if (GET_CODE (trueop0) == CONST_VECTOR)
3439 int elt_size = GET_MODE_UNIT_SIZE (mode);
3440 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3441 rtvec v = rtvec_alloc (n_elts);
3442 unsigned int i;
3444 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3445 for (i = 0; i < n_elts; i++)
3447 rtx x = XVECEXP (trueop1, 0, i);
3449 gcc_assert (CONST_INT_P (x));
3450 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3451 INTVAL (x));
3454 return gen_rtx_CONST_VECTOR (mode, v);
3457 /* Recognize the identity. */
3458 if (GET_MODE (trueop0) == mode)
3460 bool maybe_ident = true;
3461 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3463 rtx j = XVECEXP (trueop1, 0, i);
3464 if (!CONST_INT_P (j) || INTVAL (j) != i)
3466 maybe_ident = false;
3467 break;
3470 if (maybe_ident)
3471 return trueop0;
3474 /* If we build {a,b} then permute it, build the result directly. */
3475 if (XVECLEN (trueop1, 0) == 2
3476 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3477 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3478 && GET_CODE (trueop0) == VEC_CONCAT
3479 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3480 && GET_MODE (XEXP (trueop0, 0)) == mode
3481 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3482 && GET_MODE (XEXP (trueop0, 1)) == mode)
3484 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3485 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3486 rtx subop0, subop1;
3488 gcc_assert (i0 < 4 && i1 < 4);
3489 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3490 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3492 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3495 if (XVECLEN (trueop1, 0) == 2
3496 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3497 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3498 && GET_CODE (trueop0) == VEC_CONCAT
3499 && GET_MODE (trueop0) == mode)
3501 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3502 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3503 rtx subop0, subop1;
3505 gcc_assert (i0 < 2 && i1 < 2);
3506 subop0 = XEXP (trueop0, i0);
3507 subop1 = XEXP (trueop0, i1);
3509 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3512 /* If we select one half of a vec_concat, return that. */
3513 if (GET_CODE (trueop0) == VEC_CONCAT
3514 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3516 rtx subop0 = XEXP (trueop0, 0);
3517 rtx subop1 = XEXP (trueop0, 1);
3518 machine_mode mode0 = GET_MODE (subop0);
3519 machine_mode mode1 = GET_MODE (subop1);
3520 int li = GET_MODE_UNIT_SIZE (mode0);
3521 int l0 = GET_MODE_SIZE (mode0) / li;
3522 int l1 = GET_MODE_SIZE (mode1) / li;
3523 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3524 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3526 bool success = true;
3527 for (int i = 1; i < l0; ++i)
3529 rtx j = XVECEXP (trueop1, 0, i);
3530 if (!CONST_INT_P (j) || INTVAL (j) != i)
3532 success = false;
3533 break;
3536 if (success)
3537 return subop0;
3539 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3541 bool success = true;
3542 for (int i = 1; i < l1; ++i)
3544 rtx j = XVECEXP (trueop1, 0, i);
3545 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3547 success = false;
3548 break;
3551 if (success)
3552 return subop1;
3557 if (XVECLEN (trueop1, 0) == 1
3558 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3559 && GET_CODE (trueop0) == VEC_CONCAT)
3561 rtx vec = trueop0;
3562 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3564 /* Try to find the element in the VEC_CONCAT. */
3565 while (GET_MODE (vec) != mode
3566 && GET_CODE (vec) == VEC_CONCAT)
3568 HOST_WIDE_INT vec_size;
3570 if (CONST_INT_P (XEXP (vec, 0)))
3572 /* vec_concat of two const_ints doesn't make sense with
3573 respect to modes. */
3574 if (CONST_INT_P (XEXP (vec, 1)))
3575 return 0;
3577 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3578 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3580 else
3581 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3583 if (offset < vec_size)
3584 vec = XEXP (vec, 0);
3585 else
3587 offset -= vec_size;
3588 vec = XEXP (vec, 1);
3590 vec = avoid_constant_pool_reference (vec);
3593 if (GET_MODE (vec) == mode)
3594 return vec;
3597 /* If we select elements in a vec_merge that all come from the same
3598 operand, select from that operand directly. */
3599 if (GET_CODE (op0) == VEC_MERGE)
3601 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3602 if (CONST_INT_P (trueop02))
3604 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3605 bool all_operand0 = true;
3606 bool all_operand1 = true;
3607 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3609 rtx j = XVECEXP (trueop1, 0, i);
3610 if (sel & (1 << UINTVAL (j)))
3611 all_operand1 = false;
3612 else
3613 all_operand0 = false;
3615 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3616 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3617 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3618 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3622 /* If we have two nested selects that are inverses of each
3623 other, replace them with the source operand. */
3624 if (GET_CODE (trueop0) == VEC_SELECT
3625 && GET_MODE (XEXP (trueop0, 0)) == mode)
3627 rtx op0_subop1 = XEXP (trueop0, 1);
3628 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3629 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3631 /* Apply the outer ordering vector to the inner one. (The inner
3632 ordering vector is expressly permitted to be of a different
3633 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3634 then the two VEC_SELECTs cancel. */
3635 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3637 rtx x = XVECEXP (trueop1, 0, i);
3638 if (!CONST_INT_P (x))
3639 return 0;
3640 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3641 if (!CONST_INT_P (y) || i != INTVAL (y))
3642 return 0;
3644 return XEXP (trueop0, 0);
3647 return 0;
3648 case VEC_CONCAT:
3650 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3651 ? GET_MODE (trueop0)
3652 : GET_MODE_INNER (mode));
3653 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3654 ? GET_MODE (trueop1)
3655 : GET_MODE_INNER (mode));
3657 gcc_assert (VECTOR_MODE_P (mode));
3658 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3659 == GET_MODE_SIZE (mode));
3661 if (VECTOR_MODE_P (op0_mode))
3662 gcc_assert (GET_MODE_INNER (mode)
3663 == GET_MODE_INNER (op0_mode));
3664 else
3665 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3667 if (VECTOR_MODE_P (op1_mode))
3668 gcc_assert (GET_MODE_INNER (mode)
3669 == GET_MODE_INNER (op1_mode));
3670 else
3671 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3673 if ((GET_CODE (trueop0) == CONST_VECTOR
3674 || CONST_SCALAR_INT_P (trueop0)
3675 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3676 && (GET_CODE (trueop1) == CONST_VECTOR
3677 || CONST_SCALAR_INT_P (trueop1)
3678 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3680 int elt_size = GET_MODE_UNIT_SIZE (mode);
3681 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3682 rtvec v = rtvec_alloc (n_elts);
3683 unsigned int i;
3684 unsigned in_n_elts = 1;
3686 if (VECTOR_MODE_P (op0_mode))
3687 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3688 for (i = 0; i < n_elts; i++)
3690 if (i < in_n_elts)
3692 if (!VECTOR_MODE_P (op0_mode))
3693 RTVEC_ELT (v, i) = trueop0;
3694 else
3695 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3697 else
3699 if (!VECTOR_MODE_P (op1_mode))
3700 RTVEC_ELT (v, i) = trueop1;
3701 else
3702 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3703 i - in_n_elts);
3707 return gen_rtx_CONST_VECTOR (mode, v);
3710 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3711 Restrict the transformation to avoid generating a VEC_SELECT with a
3712 mode unrelated to its operand. */
3713 if (GET_CODE (trueop0) == VEC_SELECT
3714 && GET_CODE (trueop1) == VEC_SELECT
3715 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3716 && GET_MODE (XEXP (trueop0, 0)) == mode)
3718 rtx par0 = XEXP (trueop0, 1);
3719 rtx par1 = XEXP (trueop1, 1);
3720 int len0 = XVECLEN (par0, 0);
3721 int len1 = XVECLEN (par1, 0);
3722 rtvec vec = rtvec_alloc (len0 + len1);
3723 for (int i = 0; i < len0; i++)
3724 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3725 for (int i = 0; i < len1; i++)
3726 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3727 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3728 gen_rtx_PARALLEL (VOIDmode, vec));
3731 return 0;
3733 default:
3734 gcc_unreachable ();
3737 return 0;
3741 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3742 rtx op0, rtx op1)
3744 unsigned int width = GET_MODE_PRECISION (mode);
3746 if (VECTOR_MODE_P (mode)
3747 && code != VEC_CONCAT
3748 && GET_CODE (op0) == CONST_VECTOR
3749 && GET_CODE (op1) == CONST_VECTOR)
3751 unsigned n_elts = GET_MODE_NUNITS (mode);
3752 machine_mode op0mode = GET_MODE (op0);
3753 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3754 machine_mode op1mode = GET_MODE (op1);
3755 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3756 rtvec v = rtvec_alloc (n_elts);
3757 unsigned int i;
3759 gcc_assert (op0_n_elts == n_elts);
3760 gcc_assert (op1_n_elts == n_elts);
3761 for (i = 0; i < n_elts; i++)
3763 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3764 CONST_VECTOR_ELT (op0, i),
3765 CONST_VECTOR_ELT (op1, i));
3766 if (!x)
3767 return 0;
3768 RTVEC_ELT (v, i) = x;
3771 return gen_rtx_CONST_VECTOR (mode, v);
3774 if (VECTOR_MODE_P (mode)
3775 && code == VEC_CONCAT
3776 && (CONST_SCALAR_INT_P (op0)
3777 || GET_CODE (op0) == CONST_FIXED
3778 || CONST_DOUBLE_AS_FLOAT_P (op0))
3779 && (CONST_SCALAR_INT_P (op1)
3780 || CONST_DOUBLE_AS_FLOAT_P (op1)
3781 || GET_CODE (op1) == CONST_FIXED))
3783 unsigned n_elts = GET_MODE_NUNITS (mode);
3784 rtvec v = rtvec_alloc (n_elts);
3786 gcc_assert (n_elts >= 2);
3787 if (n_elts == 2)
3789 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3790 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3792 RTVEC_ELT (v, 0) = op0;
3793 RTVEC_ELT (v, 1) = op1;
3795 else
3797 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3798 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3799 unsigned i;
3801 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3802 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3803 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3805 for (i = 0; i < op0_n_elts; ++i)
3806 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3807 for (i = 0; i < op1_n_elts; ++i)
3808 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3811 return gen_rtx_CONST_VECTOR (mode, v);
3814 if (SCALAR_FLOAT_MODE_P (mode)
3815 && CONST_DOUBLE_AS_FLOAT_P (op0)
3816 && CONST_DOUBLE_AS_FLOAT_P (op1)
3817 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3819 if (code == AND
3820 || code == IOR
3821 || code == XOR)
3823 long tmp0[4];
3824 long tmp1[4];
3825 REAL_VALUE_TYPE r;
3826 int i;
3828 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3829 GET_MODE (op0));
3830 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3831 GET_MODE (op1));
3832 for (i = 0; i < 4; i++)
3834 switch (code)
3836 case AND:
3837 tmp0[i] &= tmp1[i];
3838 break;
3839 case IOR:
3840 tmp0[i] |= tmp1[i];
3841 break;
3842 case XOR:
3843 tmp0[i] ^= tmp1[i];
3844 break;
3845 default:
3846 gcc_unreachable ();
3849 real_from_target (&r, tmp0, mode);
3850 return const_double_from_real_value (r, mode);
3852 else
3854 REAL_VALUE_TYPE f0, f1, value, result;
3855 bool inexact;
3857 real_convert (&f0, mode, CONST_DOUBLE_REAL_VALUE (op0));
3858 real_convert (&f1, mode, CONST_DOUBLE_REAL_VALUE (op1));
3860 if (HONOR_SNANS (mode)
3861 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3862 return 0;
3864 if (code == DIV
3865 && real_equal (&f1, &dconst0)
3866 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3867 return 0;
3869 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3870 && flag_trapping_math
3871 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3873 int s0 = REAL_VALUE_NEGATIVE (f0);
3874 int s1 = REAL_VALUE_NEGATIVE (f1);
3876 switch (code)
3878 case PLUS:
3879 /* Inf + -Inf = NaN plus exception. */
3880 if (s0 != s1)
3881 return 0;
3882 break;
3883 case MINUS:
3884 /* Inf - Inf = NaN plus exception. */
3885 if (s0 == s1)
3886 return 0;
3887 break;
3888 case DIV:
3889 /* Inf / Inf = NaN plus exception. */
3890 return 0;
3891 default:
3892 break;
3896 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3897 && flag_trapping_math
3898 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3899 || (REAL_VALUE_ISINF (f1)
3900 && real_equal (&f0, &dconst0))))
3901 /* Inf * 0 = NaN plus exception. */
3902 return 0;
3904 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3905 &f0, &f1);
3906 real_convert (&result, mode, &value);
3908 /* Don't constant fold this floating point operation if
3909 the result has overflowed and flag_trapping_math. */
3911 if (flag_trapping_math
3912 && MODE_HAS_INFINITIES (mode)
3913 && REAL_VALUE_ISINF (result)
3914 && !REAL_VALUE_ISINF (f0)
3915 && !REAL_VALUE_ISINF (f1))
3916 /* Overflow plus exception. */
3917 return 0;
3919 /* Don't constant fold this floating point operation if the
3920 result may dependent upon the run-time rounding mode and
3921 flag_rounding_math is set, or if GCC's software emulation
3922 is unable to accurately represent the result. */
3924 if ((flag_rounding_math
3925 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3926 && (inexact || !real_identical (&result, &value)))
3927 return NULL_RTX;
3929 return const_double_from_real_value (result, mode);
3933 /* We can fold some multi-word operations. */
3934 if ((GET_MODE_CLASS (mode) == MODE_INT
3935 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3936 && CONST_SCALAR_INT_P (op0)
3937 && CONST_SCALAR_INT_P (op1))
3939 wide_int result;
3940 bool overflow;
3941 rtx_mode_t pop0 = std::make_pair (op0, mode);
3942 rtx_mode_t pop1 = std::make_pair (op1, mode);
3944 #if TARGET_SUPPORTS_WIDE_INT == 0
3945 /* This assert keeps the simplification from producing a result
3946 that cannot be represented in a CONST_DOUBLE but a lot of
3947 upstream callers expect that this function never fails to
3948 simplify something and so you if you added this to the test
3949 above the code would die later anyway. If this assert
3950 happens, you just need to make the port support wide int. */
3951 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3952 #endif
3953 switch (code)
3955 case MINUS:
3956 result = wi::sub (pop0, pop1);
3957 break;
3959 case PLUS:
3960 result = wi::add (pop0, pop1);
3961 break;
3963 case MULT:
3964 result = wi::mul (pop0, pop1);
3965 break;
3967 case DIV:
3968 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3969 if (overflow)
3970 return NULL_RTX;
3971 break;
3973 case MOD:
3974 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3975 if (overflow)
3976 return NULL_RTX;
3977 break;
3979 case UDIV:
3980 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3981 if (overflow)
3982 return NULL_RTX;
3983 break;
3985 case UMOD:
3986 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3987 if (overflow)
3988 return NULL_RTX;
3989 break;
3991 case AND:
3992 result = wi::bit_and (pop0, pop1);
3993 break;
3995 case IOR:
3996 result = wi::bit_or (pop0, pop1);
3997 break;
3999 case XOR:
4000 result = wi::bit_xor (pop0, pop1);
4001 break;
4003 case SMIN:
4004 result = wi::smin (pop0, pop1);
4005 break;
4007 case SMAX:
4008 result = wi::smax (pop0, pop1);
4009 break;
4011 case UMIN:
4012 result = wi::umin (pop0, pop1);
4013 break;
4015 case UMAX:
4016 result = wi::umax (pop0, pop1);
4017 break;
4019 case LSHIFTRT:
4020 case ASHIFTRT:
4021 case ASHIFT:
4023 wide_int wop1 = pop1;
4024 if (SHIFT_COUNT_TRUNCATED)
4025 wop1 = wi::umod_trunc (wop1, width);
4026 else if (wi::geu_p (wop1, width))
4027 return NULL_RTX;
4029 switch (code)
4031 case LSHIFTRT:
4032 result = wi::lrshift (pop0, wop1);
4033 break;
4035 case ASHIFTRT:
4036 result = wi::arshift (pop0, wop1);
4037 break;
4039 case ASHIFT:
4040 result = wi::lshift (pop0, wop1);
4041 break;
4043 default:
4044 gcc_unreachable ();
4046 break;
4048 case ROTATE:
4049 case ROTATERT:
4051 if (wi::neg_p (pop1))
4052 return NULL_RTX;
4054 switch (code)
4056 case ROTATE:
4057 result = wi::lrotate (pop0, pop1);
4058 break;
4060 case ROTATERT:
4061 result = wi::rrotate (pop0, pop1);
4062 break;
4064 default:
4065 gcc_unreachable ();
4067 break;
4069 default:
4070 return NULL_RTX;
4072 return immed_wide_int_const (result, mode);
4075 return NULL_RTX;
4080 /* Return a positive integer if X should sort after Y. The value
4081 returned is 1 if and only if X and Y are both regs. */
4083 static int
4084 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4086 int result;
4088 result = (commutative_operand_precedence (y)
4089 - commutative_operand_precedence (x));
4090 if (result)
4091 return result + result;
4093 /* Group together equal REGs to do more simplification. */
4094 if (REG_P (x) && REG_P (y))
4095 return REGNO (x) > REGNO (y);
4097 return 0;
4100 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4101 operands may be another PLUS or MINUS.
4103 Rather than test for specific case, we do this by a brute-force method
4104 and do all possible simplifications until no more changes occur. Then
4105 we rebuild the operation.
4107 May return NULL_RTX when no changes were made. */
4109 static rtx
4110 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4111 rtx op1)
4113 struct simplify_plus_minus_op_data
4115 rtx op;
4116 short neg;
4117 } ops[16];
4118 rtx result, tem;
4119 int n_ops = 2;
4120 int changed, n_constants, canonicalized = 0;
4121 int i, j;
4123 memset (ops, 0, sizeof ops);
4125 /* Set up the two operands and then expand them until nothing has been
4126 changed. If we run out of room in our array, give up; this should
4127 almost never happen. */
4129 ops[0].op = op0;
4130 ops[0].neg = 0;
4131 ops[1].op = op1;
4132 ops[1].neg = (code == MINUS);
4136 changed = 0;
4137 n_constants = 0;
4139 for (i = 0; i < n_ops; i++)
4141 rtx this_op = ops[i].op;
4142 int this_neg = ops[i].neg;
4143 enum rtx_code this_code = GET_CODE (this_op);
4145 switch (this_code)
4147 case PLUS:
4148 case MINUS:
4149 if (n_ops == ARRAY_SIZE (ops))
4150 return NULL_RTX;
4152 ops[n_ops].op = XEXP (this_op, 1);
4153 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4154 n_ops++;
4156 ops[i].op = XEXP (this_op, 0);
4157 changed = 1;
4158 /* If this operand was negated then we will potentially
4159 canonicalize the expression. Similarly if we don't
4160 place the operands adjacent we're re-ordering the
4161 expression and thus might be performing a
4162 canonicalization. Ignore register re-ordering.
4163 ??? It might be better to shuffle the ops array here,
4164 but then (plus (plus (A, B), plus (C, D))) wouldn't
4165 be seen as non-canonical. */
4166 if (this_neg
4167 || (i != n_ops - 2
4168 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4169 canonicalized = 1;
4170 break;
4172 case NEG:
4173 ops[i].op = XEXP (this_op, 0);
4174 ops[i].neg = ! this_neg;
4175 changed = 1;
4176 canonicalized = 1;
4177 break;
4179 case CONST:
4180 if (n_ops != ARRAY_SIZE (ops)
4181 && GET_CODE (XEXP (this_op, 0)) == PLUS
4182 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4183 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4185 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4186 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4187 ops[n_ops].neg = this_neg;
4188 n_ops++;
4189 changed = 1;
4190 canonicalized = 1;
4192 break;
4194 case NOT:
4195 /* ~a -> (-a - 1) */
4196 if (n_ops != ARRAY_SIZE (ops))
4198 ops[n_ops].op = CONSTM1_RTX (mode);
4199 ops[n_ops++].neg = this_neg;
4200 ops[i].op = XEXP (this_op, 0);
4201 ops[i].neg = !this_neg;
4202 changed = 1;
4203 canonicalized = 1;
4205 break;
4207 case CONST_INT:
4208 n_constants++;
4209 if (this_neg)
4211 ops[i].op = neg_const_int (mode, this_op);
4212 ops[i].neg = 0;
4213 changed = 1;
4214 canonicalized = 1;
4216 break;
4218 default:
4219 break;
4223 while (changed);
4225 if (n_constants > 1)
4226 canonicalized = 1;
4228 gcc_assert (n_ops >= 2);
4230 /* If we only have two operands, we can avoid the loops. */
4231 if (n_ops == 2)
4233 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4234 rtx lhs, rhs;
4236 /* Get the two operands. Be careful with the order, especially for
4237 the cases where code == MINUS. */
4238 if (ops[0].neg && ops[1].neg)
4240 lhs = gen_rtx_NEG (mode, ops[0].op);
4241 rhs = ops[1].op;
4243 else if (ops[0].neg)
4245 lhs = ops[1].op;
4246 rhs = ops[0].op;
4248 else
4250 lhs = ops[0].op;
4251 rhs = ops[1].op;
4254 return simplify_const_binary_operation (code, mode, lhs, rhs);
4257 /* Now simplify each pair of operands until nothing changes. */
4258 while (1)
4260 /* Insertion sort is good enough for a small array. */
4261 for (i = 1; i < n_ops; i++)
4263 struct simplify_plus_minus_op_data save;
4264 int cmp;
4266 j = i - 1;
4267 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4268 if (cmp <= 0)
4269 continue;
4270 /* Just swapping registers doesn't count as canonicalization. */
4271 if (cmp != 1)
4272 canonicalized = 1;
4274 save = ops[i];
4276 ops[j + 1] = ops[j];
4277 while (j--
4278 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4279 ops[j + 1] = save;
4282 changed = 0;
4283 for (i = n_ops - 1; i > 0; i--)
4284 for (j = i - 1; j >= 0; j--)
4286 rtx lhs = ops[j].op, rhs = ops[i].op;
4287 int lneg = ops[j].neg, rneg = ops[i].neg;
4289 if (lhs != 0 && rhs != 0)
4291 enum rtx_code ncode = PLUS;
4293 if (lneg != rneg)
4295 ncode = MINUS;
4296 if (lneg)
4297 std::swap (lhs, rhs);
4299 else if (swap_commutative_operands_p (lhs, rhs))
4300 std::swap (lhs, rhs);
4302 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4303 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4305 rtx tem_lhs, tem_rhs;
4307 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4308 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4309 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4310 tem_rhs);
4312 if (tem && !CONSTANT_P (tem))
4313 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4315 else
4316 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4318 if (tem)
4320 /* Reject "simplifications" that just wrap the two
4321 arguments in a CONST. Failure to do so can result
4322 in infinite recursion with simplify_binary_operation
4323 when it calls us to simplify CONST operations.
4324 Also, if we find such a simplification, don't try
4325 any more combinations with this rhs: We must have
4326 something like symbol+offset, ie. one of the
4327 trivial CONST expressions we handle later. */
4328 if (GET_CODE (tem) == CONST
4329 && GET_CODE (XEXP (tem, 0)) == ncode
4330 && XEXP (XEXP (tem, 0), 0) == lhs
4331 && XEXP (XEXP (tem, 0), 1) == rhs)
4332 break;
4333 lneg &= rneg;
4334 if (GET_CODE (tem) == NEG)
4335 tem = XEXP (tem, 0), lneg = !lneg;
4336 if (CONST_INT_P (tem) && lneg)
4337 tem = neg_const_int (mode, tem), lneg = 0;
4339 ops[i].op = tem;
4340 ops[i].neg = lneg;
4341 ops[j].op = NULL_RTX;
4342 changed = 1;
4343 canonicalized = 1;
4348 if (!changed)
4349 break;
4351 /* Pack all the operands to the lower-numbered entries. */
4352 for (i = 0, j = 0; j < n_ops; j++)
4353 if (ops[j].op)
4355 ops[i] = ops[j];
4356 i++;
4358 n_ops = i;
4361 /* If nothing changed, fail. */
4362 if (!canonicalized)
4363 return NULL_RTX;
4365 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4366 if (n_ops == 2
4367 && CONST_INT_P (ops[1].op)
4368 && CONSTANT_P (ops[0].op)
4369 && ops[0].neg)
4370 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4372 /* We suppressed creation of trivial CONST expressions in the
4373 combination loop to avoid recursion. Create one manually now.
4374 The combination loop should have ensured that there is exactly
4375 one CONST_INT, and the sort will have ensured that it is last
4376 in the array and that any other constant will be next-to-last. */
4378 if (n_ops > 1
4379 && CONST_INT_P (ops[n_ops - 1].op)
4380 && CONSTANT_P (ops[n_ops - 2].op))
4382 rtx value = ops[n_ops - 1].op;
4383 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4384 value = neg_const_int (mode, value);
4385 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4386 INTVAL (value));
4387 n_ops--;
4390 /* Put a non-negated operand first, if possible. */
4392 for (i = 0; i < n_ops && ops[i].neg; i++)
4393 continue;
4394 if (i == n_ops)
4395 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4396 else if (i != 0)
4398 tem = ops[0].op;
4399 ops[0] = ops[i];
4400 ops[i].op = tem;
4401 ops[i].neg = 1;
4404 /* Now make the result by performing the requested operations. */
4405 result = ops[0].op;
4406 for (i = 1; i < n_ops; i++)
4407 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4408 mode, result, ops[i].op);
4410 return result;
4413 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4414 static bool
4415 plus_minus_operand_p (const_rtx x)
4417 return GET_CODE (x) == PLUS
4418 || GET_CODE (x) == MINUS
4419 || (GET_CODE (x) == CONST
4420 && GET_CODE (XEXP (x, 0)) == PLUS
4421 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4422 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4425 /* Like simplify_binary_operation except used for relational operators.
4426 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4427 not also be VOIDmode.
4429 CMP_MODE specifies in which mode the comparison is done in, so it is
4430 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4431 the operands or, if both are VOIDmode, the operands are compared in
4432 "infinite precision". */
4434 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4435 machine_mode cmp_mode, rtx op0, rtx op1)
4437 rtx tem, trueop0, trueop1;
4439 if (cmp_mode == VOIDmode)
4440 cmp_mode = GET_MODE (op0);
4441 if (cmp_mode == VOIDmode)
4442 cmp_mode = GET_MODE (op1);
4444 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4445 if (tem)
4447 if (SCALAR_FLOAT_MODE_P (mode))
4449 if (tem == const0_rtx)
4450 return CONST0_RTX (mode);
4451 #ifdef FLOAT_STORE_FLAG_VALUE
4453 REAL_VALUE_TYPE val;
4454 val = FLOAT_STORE_FLAG_VALUE (mode);
4455 return const_double_from_real_value (val, mode);
4457 #else
4458 return NULL_RTX;
4459 #endif
4461 if (VECTOR_MODE_P (mode))
4463 if (tem == const0_rtx)
4464 return CONST0_RTX (mode);
4465 #ifdef VECTOR_STORE_FLAG_VALUE
4467 int i, units;
4468 rtvec v;
4470 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4471 if (val == NULL_RTX)
4472 return NULL_RTX;
4473 if (val == const1_rtx)
4474 return CONST1_RTX (mode);
4476 units = GET_MODE_NUNITS (mode);
4477 v = rtvec_alloc (units);
4478 for (i = 0; i < units; i++)
4479 RTVEC_ELT (v, i) = val;
4480 return gen_rtx_raw_CONST_VECTOR (mode, v);
4482 #else
4483 return NULL_RTX;
4484 #endif
4487 return tem;
4490 /* For the following tests, ensure const0_rtx is op1. */
4491 if (swap_commutative_operands_p (op0, op1)
4492 || (op0 == const0_rtx && op1 != const0_rtx))
4493 std::swap (op0, op1), code = swap_condition (code);
4495 /* If op0 is a compare, extract the comparison arguments from it. */
4496 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4497 return simplify_gen_relational (code, mode, VOIDmode,
4498 XEXP (op0, 0), XEXP (op0, 1));
4500 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4501 || CC0_P (op0))
4502 return NULL_RTX;
4504 trueop0 = avoid_constant_pool_reference (op0);
4505 trueop1 = avoid_constant_pool_reference (op1);
4506 return simplify_relational_operation_1 (code, mode, cmp_mode,
4507 trueop0, trueop1);
4510 /* This part of simplify_relational_operation is only used when CMP_MODE
4511 is not in class MODE_CC (i.e. it is a real comparison).
4513 MODE is the mode of the result, while CMP_MODE specifies in which
4514 mode the comparison is done in, so it is the mode of the operands. */
4516 static rtx
4517 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4518 machine_mode cmp_mode, rtx op0, rtx op1)
4520 enum rtx_code op0code = GET_CODE (op0);
4522 if (op1 == const0_rtx && COMPARISON_P (op0))
4524 /* If op0 is a comparison, extract the comparison arguments
4525 from it. */
4526 if (code == NE)
4528 if (GET_MODE (op0) == mode)
4529 return simplify_rtx (op0);
4530 else
4531 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4532 XEXP (op0, 0), XEXP (op0, 1));
4534 else if (code == EQ)
4536 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4537 if (new_code != UNKNOWN)
4538 return simplify_gen_relational (new_code, mode, VOIDmode,
4539 XEXP (op0, 0), XEXP (op0, 1));
4543 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4544 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4545 if ((code == LTU || code == GEU)
4546 && GET_CODE (op0) == PLUS
4547 && CONST_INT_P (XEXP (op0, 1))
4548 && (rtx_equal_p (op1, XEXP (op0, 0))
4549 || rtx_equal_p (op1, XEXP (op0, 1)))
4550 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4551 && XEXP (op0, 1) != const0_rtx)
4553 rtx new_cmp
4554 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4555 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4556 cmp_mode, XEXP (op0, 0), new_cmp);
4559 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4560 if ((code == LTU || code == GEU)
4561 && GET_CODE (op0) == PLUS
4562 && rtx_equal_p (op1, XEXP (op0, 1))
4563 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4564 && !rtx_equal_p (op1, XEXP (op0, 0)))
4565 return simplify_gen_relational (code, mode, cmp_mode, op0,
4566 copy_rtx (XEXP (op0, 0)));
4568 if (op1 == const0_rtx)
4570 /* Canonicalize (GTU x 0) as (NE x 0). */
4571 if (code == GTU)
4572 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4573 /* Canonicalize (LEU x 0) as (EQ x 0). */
4574 if (code == LEU)
4575 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4577 else if (op1 == const1_rtx)
4579 switch (code)
4581 case GE:
4582 /* Canonicalize (GE x 1) as (GT x 0). */
4583 return simplify_gen_relational (GT, mode, cmp_mode,
4584 op0, const0_rtx);
4585 case GEU:
4586 /* Canonicalize (GEU x 1) as (NE x 0). */
4587 return simplify_gen_relational (NE, mode, cmp_mode,
4588 op0, const0_rtx);
4589 case LT:
4590 /* Canonicalize (LT x 1) as (LE x 0). */
4591 return simplify_gen_relational (LE, mode, cmp_mode,
4592 op0, const0_rtx);
4593 case LTU:
4594 /* Canonicalize (LTU x 1) as (EQ x 0). */
4595 return simplify_gen_relational (EQ, mode, cmp_mode,
4596 op0, const0_rtx);
4597 default:
4598 break;
4601 else if (op1 == constm1_rtx)
4603 /* Canonicalize (LE x -1) as (LT x 0). */
4604 if (code == LE)
4605 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4606 /* Canonicalize (GT x -1) as (GE x 0). */
4607 if (code == GT)
4608 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4611 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4612 if ((code == EQ || code == NE)
4613 && (op0code == PLUS || op0code == MINUS)
4614 && CONSTANT_P (op1)
4615 && CONSTANT_P (XEXP (op0, 1))
4616 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4618 rtx x = XEXP (op0, 0);
4619 rtx c = XEXP (op0, 1);
4620 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4621 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4623 /* Detect an infinite recursive condition, where we oscillate at this
4624 simplification case between:
4625 A + B == C <---> C - B == A,
4626 where A, B, and C are all constants with non-simplifiable expressions,
4627 usually SYMBOL_REFs. */
4628 if (GET_CODE (tem) == invcode
4629 && CONSTANT_P (x)
4630 && rtx_equal_p (c, XEXP (tem, 1)))
4631 return NULL_RTX;
4633 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4636 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4637 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4638 if (code == NE
4639 && op1 == const0_rtx
4640 && GET_MODE_CLASS (mode) == MODE_INT
4641 && cmp_mode != VOIDmode
4642 /* ??? Work-around BImode bugs in the ia64 backend. */
4643 && mode != BImode
4644 && cmp_mode != BImode
4645 && nonzero_bits (op0, cmp_mode) == 1
4646 && STORE_FLAG_VALUE == 1)
4647 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4648 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4649 : lowpart_subreg (mode, op0, cmp_mode);
4651 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4652 if ((code == EQ || code == NE)
4653 && op1 == const0_rtx
4654 && op0code == XOR)
4655 return simplify_gen_relational (code, mode, cmp_mode,
4656 XEXP (op0, 0), XEXP (op0, 1));
4658 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4659 if ((code == EQ || code == NE)
4660 && op0code == XOR
4661 && rtx_equal_p (XEXP (op0, 0), op1)
4662 && !side_effects_p (XEXP (op0, 0)))
4663 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4664 CONST0_RTX (mode));
4666 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4667 if ((code == EQ || code == NE)
4668 && op0code == XOR
4669 && rtx_equal_p (XEXP (op0, 1), op1)
4670 && !side_effects_p (XEXP (op0, 1)))
4671 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4672 CONST0_RTX (mode));
4674 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4675 if ((code == EQ || code == NE)
4676 && op0code == XOR
4677 && CONST_SCALAR_INT_P (op1)
4678 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4679 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4680 simplify_gen_binary (XOR, cmp_mode,
4681 XEXP (op0, 1), op1));
4683 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4684 can be implemented with a BICS instruction on some targets, or
4685 constant-folded if y is a constant. */
4686 if ((code == EQ || code == NE)
4687 && op0code == AND
4688 && rtx_equal_p (XEXP (op0, 0), op1)
4689 && !side_effects_p (op1)
4690 && op1 != CONST0_RTX (cmp_mode))
4692 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4693 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4695 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4696 CONST0_RTX (cmp_mode));
4699 /* Likewise for (eq/ne (and x y) y). */
4700 if ((code == EQ || code == NE)
4701 && op0code == AND
4702 && rtx_equal_p (XEXP (op0, 1), op1)
4703 && !side_effects_p (op1)
4704 && op1 != CONST0_RTX (cmp_mode))
4706 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4707 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4709 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4710 CONST0_RTX (cmp_mode));
4713 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4714 if ((code == EQ || code == NE)
4715 && GET_CODE (op0) == BSWAP
4716 && CONST_SCALAR_INT_P (op1))
4717 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4718 simplify_gen_unary (BSWAP, cmp_mode,
4719 op1, cmp_mode));
4721 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4722 if ((code == EQ || code == NE)
4723 && GET_CODE (op0) == BSWAP
4724 && GET_CODE (op1) == BSWAP)
4725 return simplify_gen_relational (code, mode, cmp_mode,
4726 XEXP (op0, 0), XEXP (op1, 0));
4728 if (op0code == POPCOUNT && op1 == const0_rtx)
4729 switch (code)
4731 case EQ:
4732 case LE:
4733 case LEU:
4734 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4735 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4736 XEXP (op0, 0), const0_rtx);
4738 case NE:
4739 case GT:
4740 case GTU:
4741 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4742 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4743 XEXP (op0, 0), const0_rtx);
4745 default:
4746 break;
4749 return NULL_RTX;
4752 enum
4754 CMP_EQ = 1,
4755 CMP_LT = 2,
4756 CMP_GT = 4,
4757 CMP_LTU = 8,
4758 CMP_GTU = 16
4762 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4763 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4764 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4765 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4766 For floating-point comparisons, assume that the operands were ordered. */
4768 static rtx
4769 comparison_result (enum rtx_code code, int known_results)
4771 switch (code)
4773 case EQ:
4774 case UNEQ:
4775 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4776 case NE:
4777 case LTGT:
4778 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4780 case LT:
4781 case UNLT:
4782 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4783 case GE:
4784 case UNGE:
4785 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4787 case GT:
4788 case UNGT:
4789 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4790 case LE:
4791 case UNLE:
4792 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4794 case LTU:
4795 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4796 case GEU:
4797 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4799 case GTU:
4800 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4801 case LEU:
4802 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4804 case ORDERED:
4805 return const_true_rtx;
4806 case UNORDERED:
4807 return const0_rtx;
4808 default:
4809 gcc_unreachable ();
4813 /* Check if the given comparison (done in the given MODE) is actually
4814 a tautology or a contradiction. If the mode is VOID_mode, the
4815 comparison is done in "infinite precision". If no simplification
4816 is possible, this function returns zero. Otherwise, it returns
4817 either const_true_rtx or const0_rtx. */
4820 simplify_const_relational_operation (enum rtx_code code,
4821 machine_mode mode,
4822 rtx op0, rtx op1)
4824 rtx tem;
4825 rtx trueop0;
4826 rtx trueop1;
4828 gcc_assert (mode != VOIDmode
4829 || (GET_MODE (op0) == VOIDmode
4830 && GET_MODE (op1) == VOIDmode));
4832 /* If op0 is a compare, extract the comparison arguments from it. */
4833 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4835 op1 = XEXP (op0, 1);
4836 op0 = XEXP (op0, 0);
4838 if (GET_MODE (op0) != VOIDmode)
4839 mode = GET_MODE (op0);
4840 else if (GET_MODE (op1) != VOIDmode)
4841 mode = GET_MODE (op1);
4842 else
4843 return 0;
4846 /* We can't simplify MODE_CC values since we don't know what the
4847 actual comparison is. */
4848 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4849 return 0;
4851 /* Make sure the constant is second. */
4852 if (swap_commutative_operands_p (op0, op1))
4854 std::swap (op0, op1);
4855 code = swap_condition (code);
4858 trueop0 = avoid_constant_pool_reference (op0);
4859 trueop1 = avoid_constant_pool_reference (op1);
4861 /* For integer comparisons of A and B maybe we can simplify A - B and can
4862 then simplify a comparison of that with zero. If A and B are both either
4863 a register or a CONST_INT, this can't help; testing for these cases will
4864 prevent infinite recursion here and speed things up.
4866 We can only do this for EQ and NE comparisons as otherwise we may
4867 lose or introduce overflow which we cannot disregard as undefined as
4868 we do not know the signedness of the operation on either the left or
4869 the right hand side of the comparison. */
4871 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4872 && (code == EQ || code == NE)
4873 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4874 && (REG_P (op1) || CONST_INT_P (trueop1)))
4875 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4876 /* We cannot do this if tem is a nonzero address. */
4877 && ! nonzero_address_p (tem))
4878 return simplify_const_relational_operation (signed_condition (code),
4879 mode, tem, const0_rtx);
4881 if (! HONOR_NANS (mode) && code == ORDERED)
4882 return const_true_rtx;
4884 if (! HONOR_NANS (mode) && code == UNORDERED)
4885 return const0_rtx;
4887 /* For modes without NaNs, if the two operands are equal, we know the
4888 result except if they have side-effects. Even with NaNs we know
4889 the result of unordered comparisons and, if signaling NaNs are
4890 irrelevant, also the result of LT/GT/LTGT. */
4891 if ((! HONOR_NANS (trueop0)
4892 || code == UNEQ || code == UNLE || code == UNGE
4893 || ((code == LT || code == GT || code == LTGT)
4894 && ! HONOR_SNANS (trueop0)))
4895 && rtx_equal_p (trueop0, trueop1)
4896 && ! side_effects_p (trueop0))
4897 return comparison_result (code, CMP_EQ);
4899 /* If the operands are floating-point constants, see if we can fold
4900 the result. */
4901 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4902 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4903 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4905 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4906 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
4908 /* Comparisons are unordered iff at least one of the values is NaN. */
4909 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
4910 switch (code)
4912 case UNEQ:
4913 case UNLT:
4914 case UNGT:
4915 case UNLE:
4916 case UNGE:
4917 case NE:
4918 case UNORDERED:
4919 return const_true_rtx;
4920 case EQ:
4921 case LT:
4922 case GT:
4923 case LE:
4924 case GE:
4925 case LTGT:
4926 case ORDERED:
4927 return const0_rtx;
4928 default:
4929 return 0;
4932 return comparison_result (code,
4933 (real_equal (d0, d1) ? CMP_EQ :
4934 real_less (d0, d1) ? CMP_LT : CMP_GT));
4937 /* Otherwise, see if the operands are both integers. */
4938 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4939 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4941 /* It would be nice if we really had a mode here. However, the
4942 largest int representable on the target is as good as
4943 infinite. */
4944 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4945 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4946 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4948 if (wi::eq_p (ptrueop0, ptrueop1))
4949 return comparison_result (code, CMP_EQ);
4950 else
4952 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4953 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4954 return comparison_result (code, cr);
4958 /* Optimize comparisons with upper and lower bounds. */
4959 if (HWI_COMPUTABLE_MODE_P (mode)
4960 && CONST_INT_P (trueop1)
4961 && !side_effects_p (trueop0))
4963 int sign;
4964 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4965 HOST_WIDE_INT val = INTVAL (trueop1);
4966 HOST_WIDE_INT mmin, mmax;
4968 if (code == GEU
4969 || code == LEU
4970 || code == GTU
4971 || code == LTU)
4972 sign = 0;
4973 else
4974 sign = 1;
4976 /* Get a reduced range if the sign bit is zero. */
4977 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4979 mmin = 0;
4980 mmax = nonzero;
4982 else
4984 rtx mmin_rtx, mmax_rtx;
4985 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4987 mmin = INTVAL (mmin_rtx);
4988 mmax = INTVAL (mmax_rtx);
4989 if (sign)
4991 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4993 mmin >>= (sign_copies - 1);
4994 mmax >>= (sign_copies - 1);
4998 switch (code)
5000 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5001 case GEU:
5002 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5003 return const_true_rtx;
5004 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5005 return const0_rtx;
5006 break;
5007 case GE:
5008 if (val <= mmin)
5009 return const_true_rtx;
5010 if (val > mmax)
5011 return const0_rtx;
5012 break;
5014 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5015 case LEU:
5016 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5017 return const_true_rtx;
5018 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5019 return const0_rtx;
5020 break;
5021 case LE:
5022 if (val >= mmax)
5023 return const_true_rtx;
5024 if (val < mmin)
5025 return const0_rtx;
5026 break;
5028 case EQ:
5029 /* x == y is always false for y out of range. */
5030 if (val < mmin || val > mmax)
5031 return const0_rtx;
5032 break;
5034 /* x > y is always false for y >= mmax, always true for y < mmin. */
5035 case GTU:
5036 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5037 return const0_rtx;
5038 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5039 return const_true_rtx;
5040 break;
5041 case GT:
5042 if (val >= mmax)
5043 return const0_rtx;
5044 if (val < mmin)
5045 return const_true_rtx;
5046 break;
5048 /* x < y is always false for y <= mmin, always true for y > mmax. */
5049 case LTU:
5050 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5051 return const0_rtx;
5052 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5053 return const_true_rtx;
5054 break;
5055 case LT:
5056 if (val <= mmin)
5057 return const0_rtx;
5058 if (val > mmax)
5059 return const_true_rtx;
5060 break;
5062 case NE:
5063 /* x != y is always true for y out of range. */
5064 if (val < mmin || val > mmax)
5065 return const_true_rtx;
5066 break;
5068 default:
5069 break;
5073 /* Optimize integer comparisons with zero. */
5074 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5076 /* Some addresses are known to be nonzero. We don't know
5077 their sign, but equality comparisons are known. */
5078 if (nonzero_address_p (trueop0))
5080 if (code == EQ || code == LEU)
5081 return const0_rtx;
5082 if (code == NE || code == GTU)
5083 return const_true_rtx;
5086 /* See if the first operand is an IOR with a constant. If so, we
5087 may be able to determine the result of this comparison. */
5088 if (GET_CODE (op0) == IOR)
5090 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5091 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5093 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5094 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5095 && (UINTVAL (inner_const)
5096 & ((unsigned HOST_WIDE_INT) 1
5097 << sign_bitnum)));
5099 switch (code)
5101 case EQ:
5102 case LEU:
5103 return const0_rtx;
5104 case NE:
5105 case GTU:
5106 return const_true_rtx;
5107 case LT:
5108 case LE:
5109 if (has_sign)
5110 return const_true_rtx;
5111 break;
5112 case GT:
5113 case GE:
5114 if (has_sign)
5115 return const0_rtx;
5116 break;
5117 default:
5118 break;
5124 /* Optimize comparison of ABS with zero. */
5125 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5126 && (GET_CODE (trueop0) == ABS
5127 || (GET_CODE (trueop0) == FLOAT_EXTEND
5128 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5130 switch (code)
5132 case LT:
5133 /* Optimize abs(x) < 0.0. */
5134 if (!HONOR_SNANS (mode)
5135 && (!INTEGRAL_MODE_P (mode)
5136 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5138 if (INTEGRAL_MODE_P (mode)
5139 && (issue_strict_overflow_warning
5140 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5141 warning (OPT_Wstrict_overflow,
5142 ("assuming signed overflow does not occur when "
5143 "assuming abs (x) < 0 is false"));
5144 return const0_rtx;
5146 break;
5148 case GE:
5149 /* Optimize abs(x) >= 0.0. */
5150 if (!HONOR_NANS (mode)
5151 && (!INTEGRAL_MODE_P (mode)
5152 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5154 if (INTEGRAL_MODE_P (mode)
5155 && (issue_strict_overflow_warning
5156 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5157 warning (OPT_Wstrict_overflow,
5158 ("assuming signed overflow does not occur when "
5159 "assuming abs (x) >= 0 is true"));
5160 return const_true_rtx;
5162 break;
5164 case UNGE:
5165 /* Optimize ! (abs(x) < 0.0). */
5166 return const_true_rtx;
5168 default:
5169 break;
5173 return 0;
5176 /* Simplify CODE, an operation with result mode MODE and three operands,
5177 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5178 a constant. Return 0 if no simplifications is possible. */
5181 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5182 machine_mode op0_mode, rtx op0, rtx op1,
5183 rtx op2)
5185 unsigned int width = GET_MODE_PRECISION (mode);
5186 bool any_change = false;
5187 rtx tem, trueop2;
5189 /* VOIDmode means "infinite" precision. */
5190 if (width == 0)
5191 width = HOST_BITS_PER_WIDE_INT;
5193 switch (code)
5195 case FMA:
5196 /* Simplify negations around the multiplication. */
5197 /* -a * -b + c => a * b + c. */
5198 if (GET_CODE (op0) == NEG)
5200 tem = simplify_unary_operation (NEG, mode, op1, mode);
5201 if (tem)
5202 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5204 else if (GET_CODE (op1) == NEG)
5206 tem = simplify_unary_operation (NEG, mode, op0, mode);
5207 if (tem)
5208 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5211 /* Canonicalize the two multiplication operands. */
5212 /* a * -b + c => -b * a + c. */
5213 if (swap_commutative_operands_p (op0, op1))
5214 std::swap (op0, op1), any_change = true;
5216 if (any_change)
5217 return gen_rtx_FMA (mode, op0, op1, op2);
5218 return NULL_RTX;
5220 case SIGN_EXTRACT:
5221 case ZERO_EXTRACT:
5222 if (CONST_INT_P (op0)
5223 && CONST_INT_P (op1)
5224 && CONST_INT_P (op2)
5225 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5226 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5228 /* Extracting a bit-field from a constant */
5229 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5230 HOST_WIDE_INT op1val = INTVAL (op1);
5231 HOST_WIDE_INT op2val = INTVAL (op2);
5232 if (BITS_BIG_ENDIAN)
5233 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5234 else
5235 val >>= op2val;
5237 if (HOST_BITS_PER_WIDE_INT != op1val)
5239 /* First zero-extend. */
5240 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5241 /* If desired, propagate sign bit. */
5242 if (code == SIGN_EXTRACT
5243 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5244 != 0)
5245 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5248 return gen_int_mode (val, mode);
5250 break;
5252 case IF_THEN_ELSE:
5253 if (CONST_INT_P (op0))
5254 return op0 != const0_rtx ? op1 : op2;
5256 /* Convert c ? a : a into "a". */
5257 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5258 return op1;
5260 /* Convert a != b ? a : b into "a". */
5261 if (GET_CODE (op0) == NE
5262 && ! side_effects_p (op0)
5263 && ! HONOR_NANS (mode)
5264 && ! HONOR_SIGNED_ZEROS (mode)
5265 && ((rtx_equal_p (XEXP (op0, 0), op1)
5266 && rtx_equal_p (XEXP (op0, 1), op2))
5267 || (rtx_equal_p (XEXP (op0, 0), op2)
5268 && rtx_equal_p (XEXP (op0, 1), op1))))
5269 return op1;
5271 /* Convert a == b ? a : b into "b". */
5272 if (GET_CODE (op0) == EQ
5273 && ! side_effects_p (op0)
5274 && ! HONOR_NANS (mode)
5275 && ! HONOR_SIGNED_ZEROS (mode)
5276 && ((rtx_equal_p (XEXP (op0, 0), op1)
5277 && rtx_equal_p (XEXP (op0, 1), op2))
5278 || (rtx_equal_p (XEXP (op0, 0), op2)
5279 && rtx_equal_p (XEXP (op0, 1), op1))))
5280 return op2;
5282 /* Convert (!c) != {0,...,0} ? a : b into
5283 c != {0,...,0} ? b : a for vector modes. */
5284 if (VECTOR_MODE_P (GET_MODE (op1))
5285 && GET_CODE (op0) == NE
5286 && GET_CODE (XEXP (op0, 0)) == NOT
5287 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5289 rtx cv = XEXP (op0, 1);
5290 int nunits = CONST_VECTOR_NUNITS (cv);
5291 bool ok = true;
5292 for (int i = 0; i < nunits; ++i)
5293 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5295 ok = false;
5296 break;
5298 if (ok)
5300 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5301 XEXP (XEXP (op0, 0), 0),
5302 XEXP (op0, 1));
5303 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5304 return retval;
5308 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5310 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5311 ? GET_MODE (XEXP (op0, 1))
5312 : GET_MODE (XEXP (op0, 0)));
5313 rtx temp;
5315 /* Look for happy constants in op1 and op2. */
5316 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5318 HOST_WIDE_INT t = INTVAL (op1);
5319 HOST_WIDE_INT f = INTVAL (op2);
5321 if (t == STORE_FLAG_VALUE && f == 0)
5322 code = GET_CODE (op0);
5323 else if (t == 0 && f == STORE_FLAG_VALUE)
5325 enum rtx_code tmp;
5326 tmp = reversed_comparison_code (op0, NULL_RTX);
5327 if (tmp == UNKNOWN)
5328 break;
5329 code = tmp;
5331 else
5332 break;
5334 return simplify_gen_relational (code, mode, cmp_mode,
5335 XEXP (op0, 0), XEXP (op0, 1));
5338 if (cmp_mode == VOIDmode)
5339 cmp_mode = op0_mode;
5340 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5341 cmp_mode, XEXP (op0, 0),
5342 XEXP (op0, 1));
5344 /* See if any simplifications were possible. */
5345 if (temp)
5347 if (CONST_INT_P (temp))
5348 return temp == const0_rtx ? op2 : op1;
5349 else if (temp)
5350 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5353 break;
5355 case VEC_MERGE:
5356 gcc_assert (GET_MODE (op0) == mode);
5357 gcc_assert (GET_MODE (op1) == mode);
5358 gcc_assert (VECTOR_MODE_P (mode));
5359 trueop2 = avoid_constant_pool_reference (op2);
5360 if (CONST_INT_P (trueop2))
5362 int elt_size = GET_MODE_UNIT_SIZE (mode);
5363 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5364 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5365 unsigned HOST_WIDE_INT mask;
5366 if (n_elts == HOST_BITS_PER_WIDE_INT)
5367 mask = -1;
5368 else
5369 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5371 if (!(sel & mask) && !side_effects_p (op0))
5372 return op1;
5373 if ((sel & mask) == mask && !side_effects_p (op1))
5374 return op0;
5376 rtx trueop0 = avoid_constant_pool_reference (op0);
5377 rtx trueop1 = avoid_constant_pool_reference (op1);
5378 if (GET_CODE (trueop0) == CONST_VECTOR
5379 && GET_CODE (trueop1) == CONST_VECTOR)
5381 rtvec v = rtvec_alloc (n_elts);
5382 unsigned int i;
5384 for (i = 0; i < n_elts; i++)
5385 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5386 ? CONST_VECTOR_ELT (trueop0, i)
5387 : CONST_VECTOR_ELT (trueop1, i));
5388 return gen_rtx_CONST_VECTOR (mode, v);
5391 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5392 if no element from a appears in the result. */
5393 if (GET_CODE (op0) == VEC_MERGE)
5395 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5396 if (CONST_INT_P (tem))
5398 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5399 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5400 return simplify_gen_ternary (code, mode, mode,
5401 XEXP (op0, 1), op1, op2);
5402 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5403 return simplify_gen_ternary (code, mode, mode,
5404 XEXP (op0, 0), op1, op2);
5407 if (GET_CODE (op1) == VEC_MERGE)
5409 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5410 if (CONST_INT_P (tem))
5412 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5413 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5414 return simplify_gen_ternary (code, mode, mode,
5415 op0, XEXP (op1, 1), op2);
5416 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5417 return simplify_gen_ternary (code, mode, mode,
5418 op0, XEXP (op1, 0), op2);
5422 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5423 with a. */
5424 if (GET_CODE (op0) == VEC_DUPLICATE
5425 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5426 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5427 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5429 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5430 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5432 if (XEXP (XEXP (op0, 0), 0) == op1
5433 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5434 return op1;
5439 if (rtx_equal_p (op0, op1)
5440 && !side_effects_p (op2) && !side_effects_p (op1))
5441 return op0;
5443 break;
5445 default:
5446 gcc_unreachable ();
5449 return 0;
5452 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5453 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5454 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5456 Works by unpacking OP into a collection of 8-bit values
5457 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5458 and then repacking them again for OUTERMODE. */
5460 static rtx
5461 simplify_immed_subreg (machine_mode outermode, rtx op,
5462 machine_mode innermode, unsigned int byte)
5464 enum {
5465 value_bit = 8,
5466 value_mask = (1 << value_bit) - 1
5468 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5469 int value_start;
5470 int i;
5471 int elem;
5473 int num_elem;
5474 rtx * elems;
5475 int elem_bitsize;
5476 rtx result_s;
5477 rtvec result_v = NULL;
5478 enum mode_class outer_class;
5479 machine_mode outer_submode;
5480 int max_bitsize;
5482 /* Some ports misuse CCmode. */
5483 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5484 return op;
5486 /* We have no way to represent a complex constant at the rtl level. */
5487 if (COMPLEX_MODE_P (outermode))
5488 return NULL_RTX;
5490 /* We support any size mode. */
5491 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5492 GET_MODE_BITSIZE (innermode));
5494 /* Unpack the value. */
5496 if (GET_CODE (op) == CONST_VECTOR)
5498 num_elem = CONST_VECTOR_NUNITS (op);
5499 elems = &CONST_VECTOR_ELT (op, 0);
5500 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5502 else
5504 num_elem = 1;
5505 elems = &op;
5506 elem_bitsize = max_bitsize;
5508 /* If this asserts, it is too complicated; reducing value_bit may help. */
5509 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5510 /* I don't know how to handle endianness of sub-units. */
5511 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5513 for (elem = 0; elem < num_elem; elem++)
5515 unsigned char * vp;
5516 rtx el = elems[elem];
5518 /* Vectors are kept in target memory order. (This is probably
5519 a mistake.) */
5521 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5522 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5523 / BITS_PER_UNIT);
5524 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5525 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5526 unsigned bytele = (subword_byte % UNITS_PER_WORD
5527 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5528 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5531 switch (GET_CODE (el))
5533 case CONST_INT:
5534 for (i = 0;
5535 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5536 i += value_bit)
5537 *vp++ = INTVAL (el) >> i;
5538 /* CONST_INTs are always logically sign-extended. */
5539 for (; i < elem_bitsize; i += value_bit)
5540 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5541 break;
5543 case CONST_WIDE_INT:
5545 rtx_mode_t val = std::make_pair (el, innermode);
5546 unsigned char extend = wi::sign_mask (val);
5548 for (i = 0; i < elem_bitsize; i += value_bit)
5549 *vp++ = wi::extract_uhwi (val, i, value_bit);
5550 for (; i < elem_bitsize; i += value_bit)
5551 *vp++ = extend;
5553 break;
5555 case CONST_DOUBLE:
5556 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5558 unsigned char extend = 0;
5559 /* If this triggers, someone should have generated a
5560 CONST_INT instead. */
5561 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5563 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5564 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5565 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5567 *vp++
5568 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5569 i += value_bit;
5572 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5573 extend = -1;
5574 for (; i < elem_bitsize; i += value_bit)
5575 *vp++ = extend;
5577 else
5579 /* This is big enough for anything on the platform. */
5580 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5581 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5583 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5584 gcc_assert (bitsize <= elem_bitsize);
5585 gcc_assert (bitsize % value_bit == 0);
5587 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5588 GET_MODE (el));
5590 /* real_to_target produces its result in words affected by
5591 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5592 and use WORDS_BIG_ENDIAN instead; see the documentation
5593 of SUBREG in rtl.texi. */
5594 for (i = 0; i < bitsize; i += value_bit)
5596 int ibase;
5597 if (WORDS_BIG_ENDIAN)
5598 ibase = bitsize - 1 - i;
5599 else
5600 ibase = i;
5601 *vp++ = tmp[ibase / 32] >> i % 32;
5604 /* It shouldn't matter what's done here, so fill it with
5605 zero. */
5606 for (; i < elem_bitsize; i += value_bit)
5607 *vp++ = 0;
5609 break;
5611 case CONST_FIXED:
5612 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5614 for (i = 0; i < elem_bitsize; i += value_bit)
5615 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5617 else
5619 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5620 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5621 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5622 i += value_bit)
5623 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5624 >> (i - HOST_BITS_PER_WIDE_INT);
5625 for (; i < elem_bitsize; i += value_bit)
5626 *vp++ = 0;
5628 break;
5630 default:
5631 gcc_unreachable ();
5635 /* Now, pick the right byte to start with. */
5636 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5637 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5638 will already have offset 0. */
5639 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5641 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5642 - byte);
5643 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5644 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5645 byte = (subword_byte % UNITS_PER_WORD
5646 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5649 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5650 so if it's become negative it will instead be very large.) */
5651 gcc_assert (byte < GET_MODE_SIZE (innermode));
5653 /* Convert from bytes to chunks of size value_bit. */
5654 value_start = byte * (BITS_PER_UNIT / value_bit);
5656 /* Re-pack the value. */
5657 num_elem = GET_MODE_NUNITS (outermode);
5659 if (VECTOR_MODE_P (outermode))
5661 result_v = rtvec_alloc (num_elem);
5662 elems = &RTVEC_ELT (result_v, 0);
5664 else
5665 elems = &result_s;
5667 outer_submode = GET_MODE_INNER (outermode);
5668 outer_class = GET_MODE_CLASS (outer_submode);
5669 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5671 gcc_assert (elem_bitsize % value_bit == 0);
5672 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5674 for (elem = 0; elem < num_elem; elem++)
5676 unsigned char *vp;
5678 /* Vectors are stored in target memory order. (This is probably
5679 a mistake.) */
5681 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5682 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5683 / BITS_PER_UNIT);
5684 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5685 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5686 unsigned bytele = (subword_byte % UNITS_PER_WORD
5687 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5688 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5691 switch (outer_class)
5693 case MODE_INT:
5694 case MODE_PARTIAL_INT:
5696 int u;
5697 int base = 0;
5698 int units
5699 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5700 / HOST_BITS_PER_WIDE_INT;
5701 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5702 wide_int r;
5704 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5705 return NULL_RTX;
5706 for (u = 0; u < units; u++)
5708 unsigned HOST_WIDE_INT buf = 0;
5709 for (i = 0;
5710 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5711 i += value_bit)
5712 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5714 tmp[u] = buf;
5715 base += HOST_BITS_PER_WIDE_INT;
5717 r = wide_int::from_array (tmp, units,
5718 GET_MODE_PRECISION (outer_submode));
5719 #if TARGET_SUPPORTS_WIDE_INT == 0
5720 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5721 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5722 return NULL_RTX;
5723 #endif
5724 elems[elem] = immed_wide_int_const (r, outer_submode);
5726 break;
5728 case MODE_FLOAT:
5729 case MODE_DECIMAL_FLOAT:
5731 REAL_VALUE_TYPE r;
5732 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5734 /* real_from_target wants its input in words affected by
5735 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5736 and use WORDS_BIG_ENDIAN instead; see the documentation
5737 of SUBREG in rtl.texi. */
5738 for (i = 0; i < max_bitsize / 32; i++)
5739 tmp[i] = 0;
5740 for (i = 0; i < elem_bitsize; i += value_bit)
5742 int ibase;
5743 if (WORDS_BIG_ENDIAN)
5744 ibase = elem_bitsize - 1 - i;
5745 else
5746 ibase = i;
5747 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5750 real_from_target (&r, tmp, outer_submode);
5751 elems[elem] = const_double_from_real_value (r, outer_submode);
5753 break;
5755 case MODE_FRACT:
5756 case MODE_UFRACT:
5757 case MODE_ACCUM:
5758 case MODE_UACCUM:
5760 FIXED_VALUE_TYPE f;
5761 f.data.low = 0;
5762 f.data.high = 0;
5763 f.mode = outer_submode;
5765 for (i = 0;
5766 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5767 i += value_bit)
5768 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5769 for (; i < elem_bitsize; i += value_bit)
5770 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5771 << (i - HOST_BITS_PER_WIDE_INT));
5773 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5775 break;
5777 default:
5778 gcc_unreachable ();
5781 if (VECTOR_MODE_P (outermode))
5782 return gen_rtx_CONST_VECTOR (outermode, result_v);
5783 else
5784 return result_s;
5787 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5788 Return 0 if no simplifications are possible. */
5790 simplify_subreg (machine_mode outermode, rtx op,
5791 machine_mode innermode, unsigned int byte)
5793 /* Little bit of sanity checking. */
5794 gcc_assert (innermode != VOIDmode);
5795 gcc_assert (outermode != VOIDmode);
5796 gcc_assert (innermode != BLKmode);
5797 gcc_assert (outermode != BLKmode);
5799 gcc_assert (GET_MODE (op) == innermode
5800 || GET_MODE (op) == VOIDmode);
5802 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5803 return NULL_RTX;
5805 if (byte >= GET_MODE_SIZE (innermode))
5806 return NULL_RTX;
5808 if (outermode == innermode && !byte)
5809 return op;
5811 if (CONST_SCALAR_INT_P (op)
5812 || CONST_DOUBLE_AS_FLOAT_P (op)
5813 || GET_CODE (op) == CONST_FIXED
5814 || GET_CODE (op) == CONST_VECTOR)
5815 return simplify_immed_subreg (outermode, op, innermode, byte);
5817 /* Changing mode twice with SUBREG => just change it once,
5818 or not at all if changing back op starting mode. */
5819 if (GET_CODE (op) == SUBREG)
5821 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5822 int final_offset = byte + SUBREG_BYTE (op);
5823 rtx newx;
5825 if (outermode == innermostmode
5826 && byte == 0 && SUBREG_BYTE (op) == 0)
5827 return SUBREG_REG (op);
5829 /* The SUBREG_BYTE represents offset, as if the value were stored
5830 in memory. Irritating exception is paradoxical subreg, where
5831 we define SUBREG_BYTE to be 0. On big endian machines, this
5832 value should be negative. For a moment, undo this exception. */
5833 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5835 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5836 if (WORDS_BIG_ENDIAN)
5837 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5838 if (BYTES_BIG_ENDIAN)
5839 final_offset += difference % UNITS_PER_WORD;
5841 if (SUBREG_BYTE (op) == 0
5842 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5844 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5845 if (WORDS_BIG_ENDIAN)
5846 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5847 if (BYTES_BIG_ENDIAN)
5848 final_offset += difference % UNITS_PER_WORD;
5851 /* See whether resulting subreg will be paradoxical. */
5852 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5854 /* In nonparadoxical subregs we can't handle negative offsets. */
5855 if (final_offset < 0)
5856 return NULL_RTX;
5857 /* Bail out in case resulting subreg would be incorrect. */
5858 if (final_offset % GET_MODE_SIZE (outermode)
5859 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5860 return NULL_RTX;
5862 else
5864 int offset = 0;
5865 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5867 /* In paradoxical subreg, see if we are still looking on lower part.
5868 If so, our SUBREG_BYTE will be 0. */
5869 if (WORDS_BIG_ENDIAN)
5870 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5871 if (BYTES_BIG_ENDIAN)
5872 offset += difference % UNITS_PER_WORD;
5873 if (offset == final_offset)
5874 final_offset = 0;
5875 else
5876 return NULL_RTX;
5879 /* Recurse for further possible simplifications. */
5880 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5881 final_offset);
5882 if (newx)
5883 return newx;
5884 if (validate_subreg (outermode, innermostmode,
5885 SUBREG_REG (op), final_offset))
5887 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5888 if (SUBREG_PROMOTED_VAR_P (op)
5889 && SUBREG_PROMOTED_SIGN (op) >= 0
5890 && GET_MODE_CLASS (outermode) == MODE_INT
5891 && IN_RANGE (GET_MODE_SIZE (outermode),
5892 GET_MODE_SIZE (innermode),
5893 GET_MODE_SIZE (innermostmode))
5894 && subreg_lowpart_p (newx))
5896 SUBREG_PROMOTED_VAR_P (newx) = 1;
5897 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5899 return newx;
5901 return NULL_RTX;
5904 /* SUBREG of a hard register => just change the register number
5905 and/or mode. If the hard register is not valid in that mode,
5906 suppress this simplification. If the hard register is the stack,
5907 frame, or argument pointer, leave this as a SUBREG. */
5909 if (REG_P (op) && HARD_REGISTER_P (op))
5911 unsigned int regno, final_regno;
5913 regno = REGNO (op);
5914 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5915 if (HARD_REGISTER_NUM_P (final_regno))
5917 rtx x;
5918 int final_offset = byte;
5920 /* Adjust offset for paradoxical subregs. */
5921 if (byte == 0
5922 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5924 int difference = (GET_MODE_SIZE (innermode)
5925 - GET_MODE_SIZE (outermode));
5926 if (WORDS_BIG_ENDIAN)
5927 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5928 if (BYTES_BIG_ENDIAN)
5929 final_offset += difference % UNITS_PER_WORD;
5932 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5934 /* Propagate original regno. We don't have any way to specify
5935 the offset inside original regno, so do so only for lowpart.
5936 The information is used only by alias analysis that can not
5937 grog partial register anyway. */
5939 if (subreg_lowpart_offset (outermode, innermode) == byte)
5940 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5941 return x;
5945 /* If we have a SUBREG of a register that we are replacing and we are
5946 replacing it with a MEM, make a new MEM and try replacing the
5947 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5948 or if we would be widening it. */
5950 if (MEM_P (op)
5951 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5952 /* Allow splitting of volatile memory references in case we don't
5953 have instruction to move the whole thing. */
5954 && (! MEM_VOLATILE_P (op)
5955 || ! have_insn_for (SET, innermode))
5956 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5957 return adjust_address_nv (op, outermode, byte);
5959 /* Handle complex values represented as CONCAT
5960 of real and imaginary part. */
5961 if (GET_CODE (op) == CONCAT)
5963 unsigned int part_size, final_offset;
5964 rtx part, res;
5966 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5967 if (byte < part_size)
5969 part = XEXP (op, 0);
5970 final_offset = byte;
5972 else
5974 part = XEXP (op, 1);
5975 final_offset = byte - part_size;
5978 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5979 return NULL_RTX;
5981 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5982 if (res)
5983 return res;
5984 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5985 return gen_rtx_SUBREG (outermode, part, final_offset);
5986 return NULL_RTX;
5989 /* A SUBREG resulting from a zero extension may fold to zero if
5990 it extracts higher bits that the ZERO_EXTEND's source bits. */
5991 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5993 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5994 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5995 return CONST0_RTX (outermode);
5998 if (SCALAR_INT_MODE_P (outermode)
5999 && SCALAR_INT_MODE_P (innermode)
6000 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6001 && byte == subreg_lowpart_offset (outermode, innermode))
6003 rtx tem = simplify_truncation (outermode, op, innermode);
6004 if (tem)
6005 return tem;
6008 return NULL_RTX;
6011 /* Make a SUBREG operation or equivalent if it folds. */
6014 simplify_gen_subreg (machine_mode outermode, rtx op,
6015 machine_mode innermode, unsigned int byte)
6017 rtx newx;
6019 newx = simplify_subreg (outermode, op, innermode, byte);
6020 if (newx)
6021 return newx;
6023 if (GET_CODE (op) == SUBREG
6024 || GET_CODE (op) == CONCAT
6025 || GET_MODE (op) == VOIDmode)
6026 return NULL_RTX;
6028 if (validate_subreg (outermode, innermode, op, byte))
6029 return gen_rtx_SUBREG (outermode, op, byte);
6031 return NULL_RTX;
6034 /* Generates a subreg to get the least significant part of EXPR (in mode
6035 INNER_MODE) to OUTER_MODE. */
6038 lowpart_subreg (machine_mode outer_mode, rtx expr,
6039 machine_mode inner_mode)
6041 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6042 subreg_lowpart_offset (outer_mode, inner_mode));
6045 /* Simplify X, an rtx expression.
6047 Return the simplified expression or NULL if no simplifications
6048 were possible.
6050 This is the preferred entry point into the simplification routines;
6051 however, we still allow passes to call the more specific routines.
6053 Right now GCC has three (yes, three) major bodies of RTL simplification
6054 code that need to be unified.
6056 1. fold_rtx in cse.c. This code uses various CSE specific
6057 information to aid in RTL simplification.
6059 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6060 it uses combine specific information to aid in RTL
6061 simplification.
6063 3. The routines in this file.
6066 Long term we want to only have one body of simplification code; to
6067 get to that state I recommend the following steps:
6069 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6070 which are not pass dependent state into these routines.
6072 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6073 use this routine whenever possible.
6075 3. Allow for pass dependent state to be provided to these
6076 routines and add simplifications based on the pass dependent
6077 state. Remove code from cse.c & combine.c that becomes
6078 redundant/dead.
6080 It will take time, but ultimately the compiler will be easier to
6081 maintain and improve. It's totally silly that when we add a
6082 simplification that it needs to be added to 4 places (3 for RTL
6083 simplification and 1 for tree simplification. */
6086 simplify_rtx (const_rtx x)
6088 const enum rtx_code code = GET_CODE (x);
6089 const machine_mode mode = GET_MODE (x);
6091 switch (GET_RTX_CLASS (code))
6093 case RTX_UNARY:
6094 return simplify_unary_operation (code, mode,
6095 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6096 case RTX_COMM_ARITH:
6097 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6098 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6100 /* Fall through.... */
6102 case RTX_BIN_ARITH:
6103 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6105 case RTX_TERNARY:
6106 case RTX_BITFIELD_OPS:
6107 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6108 XEXP (x, 0), XEXP (x, 1),
6109 XEXP (x, 2));
6111 case RTX_COMPARE:
6112 case RTX_COMM_COMPARE:
6113 return simplify_relational_operation (code, mode,
6114 ((GET_MODE (XEXP (x, 0))
6115 != VOIDmode)
6116 ? GET_MODE (XEXP (x, 0))
6117 : GET_MODE (XEXP (x, 1))),
6118 XEXP (x, 0),
6119 XEXP (x, 1));
6121 case RTX_EXTRA:
6122 if (code == SUBREG)
6123 return simplify_subreg (mode, SUBREG_REG (x),
6124 GET_MODE (SUBREG_REG (x)),
6125 SUBREG_BYTE (x));
6126 break;
6128 case RTX_OBJ:
6129 if (code == LO_SUM)
6131 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6132 if (GET_CODE (XEXP (x, 0)) == HIGH
6133 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6134 return XEXP (x, 1);
6136 break;
6138 default:
6139 break;
6141 return NULL;