Move some comparison simplifications to match.pd
[official-gcc.git] / gcc / simplify-rtx.c
blob8d86e57bf79f3c48167e60e5d763dec6b4e8b9d3
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "rtl.h"
27 #include "alias.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "varasm.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "insn-codes.h"
37 #include "optabs.h"
38 #include "expmed.h"
39 #include "dojump.h"
40 #include "explow.h"
41 #include "calls.h"
42 #include "emit-rtl.h"
43 #include "stmt.h"
44 #include "expr.h"
45 #include "diagnostic-core.h"
46 #include "target.h"
48 /* Simplification and canonicalization of RTL. */
50 /* Much code operates on (low, high) pairs; the low value is an
51 unsigned wide int, the high value a signed wide int. We
52 occasionally need to sign extend from low to high as if low were a
53 signed wide int. */
54 #define HWI_SIGN_EXTEND(low) \
55 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
57 static rtx neg_const_int (machine_mode, const_rtx);
58 static bool plus_minus_operand_p (const_rtx);
59 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
60 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
61 unsigned int);
62 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
63 rtx, rtx);
64 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
65 machine_mode, rtx, rtx);
66 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
67 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
68 rtx, rtx, rtx, rtx);
70 /* Negate a CONST_INT rtx, truncating (because a conversion from a
71 maximally negative number can overflow). */
72 static rtx
73 neg_const_int (machine_mode mode, const_rtx i)
75 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
78 /* Test whether expression, X, is an immediate constant that represents
79 the most significant bit of machine mode MODE. */
81 bool
82 mode_signbit_p (machine_mode mode, const_rtx x)
84 unsigned HOST_WIDE_INT val;
85 unsigned int width;
87 if (GET_MODE_CLASS (mode) != MODE_INT)
88 return false;
90 width = GET_MODE_PRECISION (mode);
91 if (width == 0)
92 return false;
94 if (width <= HOST_BITS_PER_WIDE_INT
95 && CONST_INT_P (x))
96 val = INTVAL (x);
97 #if TARGET_SUPPORTS_WIDE_INT
98 else if (CONST_WIDE_INT_P (x))
100 unsigned int i;
101 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
102 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
103 return false;
104 for (i = 0; i < elts - 1; i++)
105 if (CONST_WIDE_INT_ELT (x, i) != 0)
106 return false;
107 val = CONST_WIDE_INT_ELT (x, elts - 1);
108 width %= HOST_BITS_PER_WIDE_INT;
109 if (width == 0)
110 width = HOST_BITS_PER_WIDE_INT;
112 #else
113 else if (width <= HOST_BITS_PER_DOUBLE_INT
114 && CONST_DOUBLE_AS_INT_P (x)
115 && CONST_DOUBLE_LOW (x) == 0)
117 val = CONST_DOUBLE_HIGH (x);
118 width -= HOST_BITS_PER_WIDE_INT;
120 #endif
121 else
122 /* X is not an integer constant. */
123 return false;
125 if (width < HOST_BITS_PER_WIDE_INT)
126 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
127 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
130 /* Test whether VAL is equal to the most significant bit of mode MODE
131 (after masking with the mode mask of MODE). Returns false if the
132 precision of MODE is too large to handle. */
134 bool
135 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
137 unsigned int width;
139 if (GET_MODE_CLASS (mode) != MODE_INT)
140 return false;
142 width = GET_MODE_PRECISION (mode);
143 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
144 return false;
146 val &= GET_MODE_MASK (mode);
147 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
150 /* Test whether the most significant bit of mode MODE is set in VAL.
151 Returns false if the precision of MODE is too large to handle. */
152 bool
153 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
155 unsigned int width;
157 if (GET_MODE_CLASS (mode) != MODE_INT)
158 return false;
160 width = GET_MODE_PRECISION (mode);
161 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
162 return false;
164 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
165 return val != 0;
168 /* Test whether the most significant bit of mode MODE is clear in VAL.
169 Returns false if the precision of MODE is too large to handle. */
170 bool
171 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
173 unsigned int width;
175 if (GET_MODE_CLASS (mode) != MODE_INT)
176 return false;
178 width = GET_MODE_PRECISION (mode);
179 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
180 return false;
182 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
183 return val == 0;
186 /* Make a binary operation by properly ordering the operands and
187 seeing if the expression folds. */
190 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
191 rtx op1)
193 rtx tem;
195 /* If this simplifies, do it. */
196 tem = simplify_binary_operation (code, mode, op0, op1);
197 if (tem)
198 return tem;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
202 && swap_commutative_operands_p (op0, op1))
203 std::swap (op0, op1);
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
208 /* If X is a MEM referencing the constant pool, return the real value.
209 Otherwise return X. */
211 avoid_constant_pool_reference (rtx x)
213 rtx c, tmp, addr;
214 machine_mode cmode;
215 HOST_WIDE_INT offset = 0;
217 switch (GET_CODE (x))
219 case MEM:
220 break;
222 case FLOAT_EXTEND:
223 /* Handle float extensions of constant pool references. */
224 tmp = XEXP (x, 0);
225 c = avoid_constant_pool_reference (tmp);
226 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
228 REAL_VALUE_TYPE d;
230 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
231 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
233 return x;
235 default:
236 return x;
239 if (GET_MODE (x) == BLKmode)
240 return x;
242 addr = XEXP (x, 0);
244 /* Call target hook to avoid the effects of -fpic etc.... */
245 addr = targetm.delegitimize_address (addr);
247 /* Split the address into a base and integer offset. */
248 if (GET_CODE (addr) == CONST
249 && GET_CODE (XEXP (addr, 0)) == PLUS
250 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
252 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
253 addr = XEXP (XEXP (addr, 0), 0);
256 if (GET_CODE (addr) == LO_SUM)
257 addr = XEXP (addr, 1);
259 /* If this is a constant pool reference, we can turn it into its
260 constant and hope that simplifications happen. */
261 if (GET_CODE (addr) == SYMBOL_REF
262 && CONSTANT_POOL_ADDRESS_P (addr))
264 c = get_pool_constant (addr);
265 cmode = get_pool_mode (addr);
267 /* If we're accessing the constant in a different mode than it was
268 originally stored, attempt to fix that up via subreg simplifications.
269 If that fails we have no choice but to return the original memory. */
270 if ((offset != 0 || cmode != GET_MODE (x))
271 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
273 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
274 if (tem && CONSTANT_P (tem))
275 return tem;
277 else
278 return c;
281 return x;
284 /* Simplify a MEM based on its attributes. This is the default
285 delegitimize_address target hook, and it's recommended that every
286 overrider call it. */
289 delegitimize_mem_from_attrs (rtx x)
291 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
292 use their base addresses as equivalent. */
293 if (MEM_P (x)
294 && MEM_EXPR (x)
295 && MEM_OFFSET_KNOWN_P (x))
297 tree decl = MEM_EXPR (x);
298 machine_mode mode = GET_MODE (x);
299 HOST_WIDE_INT offset = 0;
301 switch (TREE_CODE (decl))
303 default:
304 decl = NULL;
305 break;
307 case VAR_DECL:
308 break;
310 case ARRAY_REF:
311 case ARRAY_RANGE_REF:
312 case COMPONENT_REF:
313 case BIT_FIELD_REF:
314 case REALPART_EXPR:
315 case IMAGPART_EXPR:
316 case VIEW_CONVERT_EXPR:
318 HOST_WIDE_INT bitsize, bitpos;
319 tree toffset;
320 int unsignedp, volatilep = 0;
322 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
323 &mode, &unsignedp, &volatilep, false);
324 if (bitsize != GET_MODE_BITSIZE (mode)
325 || (bitpos % BITS_PER_UNIT)
326 || (toffset && !tree_fits_shwi_p (toffset)))
327 decl = NULL;
328 else
330 offset += bitpos / BITS_PER_UNIT;
331 if (toffset)
332 offset += tree_to_shwi (toffset);
334 break;
338 if (decl
339 && mode == GET_MODE (x)
340 && TREE_CODE (decl) == VAR_DECL
341 && (TREE_STATIC (decl)
342 || DECL_THREAD_LOCAL_P (decl))
343 && DECL_RTL_SET_P (decl)
344 && MEM_P (DECL_RTL (decl)))
346 rtx newx;
348 offset += MEM_OFFSET (x);
350 newx = DECL_RTL (decl);
352 if (MEM_P (newx))
354 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
356 /* Avoid creating a new MEM needlessly if we already had
357 the same address. We do if there's no OFFSET and the
358 old address X is identical to NEWX, or if X is of the
359 form (plus NEWX OFFSET), or the NEWX is of the form
360 (plus Y (const_int Z)) and X is that with the offset
361 added: (plus Y (const_int Z+OFFSET)). */
362 if (!((offset == 0
363 || (GET_CODE (o) == PLUS
364 && GET_CODE (XEXP (o, 1)) == CONST_INT
365 && (offset == INTVAL (XEXP (o, 1))
366 || (GET_CODE (n) == PLUS
367 && GET_CODE (XEXP (n, 1)) == CONST_INT
368 && (INTVAL (XEXP (n, 1)) + offset
369 == INTVAL (XEXP (o, 1)))
370 && (n = XEXP (n, 0))))
371 && (o = XEXP (o, 0))))
372 && rtx_equal_p (o, n)))
373 x = adjust_address_nv (newx, mode, offset);
375 else if (GET_MODE (x) == GET_MODE (newx)
376 && offset == 0)
377 x = newx;
381 return x;
384 /* Make a unary operation by first seeing if it folds and otherwise making
385 the specified operation. */
388 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
389 machine_mode op_mode)
391 rtx tem;
393 /* If this simplifies, use it. */
394 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
395 return tem;
397 return gen_rtx_fmt_e (code, mode, op);
400 /* Likewise for ternary operations. */
403 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
404 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
406 rtx tem;
408 /* If this simplifies, use it. */
409 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
410 op0, op1, op2)))
411 return tem;
413 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
416 /* Likewise, for relational operations.
417 CMP_MODE specifies mode comparison is done in. */
420 simplify_gen_relational (enum rtx_code code, machine_mode mode,
421 machine_mode cmp_mode, rtx op0, rtx op1)
423 rtx tem;
425 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
426 op0, op1)))
427 return tem;
429 return gen_rtx_fmt_ee (code, mode, op0, op1);
432 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
433 and simplify the result. If FN is non-NULL, call this callback on each
434 X, if it returns non-NULL, replace X with its return value and simplify the
435 result. */
438 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
439 rtx (*fn) (rtx, const_rtx, void *), void *data)
441 enum rtx_code code = GET_CODE (x);
442 machine_mode mode = GET_MODE (x);
443 machine_mode op_mode;
444 const char *fmt;
445 rtx op0, op1, op2, newx, op;
446 rtvec vec, newvec;
447 int i, j;
449 if (__builtin_expect (fn != NULL, 0))
451 newx = fn (x, old_rtx, data);
452 if (newx)
453 return newx;
455 else if (rtx_equal_p (x, old_rtx))
456 return copy_rtx ((rtx) data);
458 switch (GET_RTX_CLASS (code))
460 case RTX_UNARY:
461 op0 = XEXP (x, 0);
462 op_mode = GET_MODE (op0);
463 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
464 if (op0 == XEXP (x, 0))
465 return x;
466 return simplify_gen_unary (code, mode, op0, op_mode);
468 case RTX_BIN_ARITH:
469 case RTX_COMM_ARITH:
470 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
471 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
472 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
473 return x;
474 return simplify_gen_binary (code, mode, op0, op1);
476 case RTX_COMPARE:
477 case RTX_COMM_COMPARE:
478 op0 = XEXP (x, 0);
479 op1 = XEXP (x, 1);
480 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
483 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
484 return x;
485 return simplify_gen_relational (code, mode, op_mode, op0, op1);
487 case RTX_TERNARY:
488 case RTX_BITFIELD_OPS:
489 op0 = XEXP (x, 0);
490 op_mode = GET_MODE (op0);
491 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
492 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
493 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
494 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
495 return x;
496 if (op_mode == VOIDmode)
497 op_mode = GET_MODE (op0);
498 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
500 case RTX_EXTRA:
501 if (code == SUBREG)
503 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
504 if (op0 == SUBREG_REG (x))
505 return x;
506 op0 = simplify_gen_subreg (GET_MODE (x), op0,
507 GET_MODE (SUBREG_REG (x)),
508 SUBREG_BYTE (x));
509 return op0 ? op0 : x;
511 break;
513 case RTX_OBJ:
514 if (code == MEM)
516 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
517 if (op0 == XEXP (x, 0))
518 return x;
519 return replace_equiv_address_nv (x, op0);
521 else if (code == LO_SUM)
523 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
524 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
526 /* (lo_sum (high x) y) -> y where x and y have the same base. */
527 if (GET_CODE (op0) == HIGH)
529 rtx base0, base1, offset0, offset1;
530 split_const (XEXP (op0, 0), &base0, &offset0);
531 split_const (op1, &base1, &offset1);
532 if (rtx_equal_p (base0, base1))
533 return op1;
536 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
537 return x;
538 return gen_rtx_LO_SUM (mode, op0, op1);
540 break;
542 default:
543 break;
546 newx = x;
547 fmt = GET_RTX_FORMAT (code);
548 for (i = 0; fmt[i]; i++)
549 switch (fmt[i])
551 case 'E':
552 vec = XVEC (x, i);
553 newvec = XVEC (newx, i);
554 for (j = 0; j < GET_NUM_ELEM (vec); j++)
556 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
557 old_rtx, fn, data);
558 if (op != RTVEC_ELT (vec, j))
560 if (newvec == vec)
562 newvec = shallow_copy_rtvec (vec);
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XVEC (newx, i) = newvec;
567 RTVEC_ELT (newvec, j) = op;
570 break;
572 case 'e':
573 if (XEXP (x, i))
575 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
576 if (op != XEXP (x, i))
578 if (x == newx)
579 newx = shallow_copy_rtx (x);
580 XEXP (newx, i) = op;
583 break;
585 return newx;
588 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
589 resulting RTX. Return a new RTX which is as simplified as possible. */
592 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
594 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
597 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
598 Only handle cases where the truncated value is inherently an rvalue.
600 RTL provides two ways of truncating a value:
602 1. a lowpart subreg. This form is only a truncation when both
603 the outer and inner modes (here MODE and OP_MODE respectively)
604 are scalar integers, and only then when the subreg is used as
605 an rvalue.
607 It is only valid to form such truncating subregs if the
608 truncation requires no action by the target. The onus for
609 proving this is on the creator of the subreg -- e.g. the
610 caller to simplify_subreg or simplify_gen_subreg -- and typically
611 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
613 2. a TRUNCATE. This form handles both scalar and compound integers.
615 The first form is preferred where valid. However, the TRUNCATE
616 handling in simplify_unary_operation turns the second form into the
617 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
618 so it is generally safe to form rvalue truncations using:
620 simplify_gen_unary (TRUNCATE, ...)
622 and leave simplify_unary_operation to work out which representation
623 should be used.
625 Because of the proof requirements on (1), simplify_truncation must
626 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
627 regardless of whether the outer truncation came from a SUBREG or a
628 TRUNCATE. For example, if the caller has proven that an SImode
629 truncation of:
631 (and:DI X Y)
633 is a no-op and can be represented as a subreg, it does not follow
634 that SImode truncations of X and Y are also no-ops. On a target
635 like 64-bit MIPS that requires SImode values to be stored in
636 sign-extended form, an SImode truncation of:
638 (and:DI (reg:DI X) (const_int 63))
640 is trivially a no-op because only the lower 6 bits can be set.
641 However, X is still an arbitrary 64-bit number and so we cannot
642 assume that truncating it too is a no-op. */
644 static rtx
645 simplify_truncation (machine_mode mode, rtx op,
646 machine_mode op_mode)
648 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
649 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
650 gcc_assert (precision <= op_precision);
652 /* Optimize truncations of zero and sign extended values. */
653 if (GET_CODE (op) == ZERO_EXTEND
654 || GET_CODE (op) == SIGN_EXTEND)
656 /* There are three possibilities. If MODE is the same as the
657 origmode, we can omit both the extension and the subreg.
658 If MODE is not larger than the origmode, we can apply the
659 truncation without the extension. Finally, if the outermode
660 is larger than the origmode, we can just extend to the appropriate
661 mode. */
662 machine_mode origmode = GET_MODE (XEXP (op, 0));
663 if (mode == origmode)
664 return XEXP (op, 0);
665 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
666 return simplify_gen_unary (TRUNCATE, mode,
667 XEXP (op, 0), origmode);
668 else
669 return simplify_gen_unary (GET_CODE (op), mode,
670 XEXP (op, 0), origmode);
673 /* If the machine can perform operations in the truncated mode, distribute
674 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
675 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
676 if (1
677 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
678 && (GET_CODE (op) == PLUS
679 || GET_CODE (op) == MINUS
680 || GET_CODE (op) == MULT))
682 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
683 if (op0)
685 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
686 if (op1)
687 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
691 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
692 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
693 the outer subreg is effectively a truncation to the original mode. */
694 if ((GET_CODE (op) == LSHIFTRT
695 || GET_CODE (op) == ASHIFTRT)
696 /* Ensure that OP_MODE is at least twice as wide as MODE
697 to avoid the possibility that an outer LSHIFTRT shifts by more
698 than the sign extension's sign_bit_copies and introduces zeros
699 into the high bits of the result. */
700 && 2 * precision <= op_precision
701 && CONST_INT_P (XEXP (op, 1))
702 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
703 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
704 && UINTVAL (XEXP (op, 1)) < precision)
705 return simplify_gen_binary (ASHIFTRT, mode,
706 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
708 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
709 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
710 the outer subreg is effectively a truncation to the original mode. */
711 if ((GET_CODE (op) == LSHIFTRT
712 || GET_CODE (op) == ASHIFTRT)
713 && CONST_INT_P (XEXP (op, 1))
714 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
715 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
716 && UINTVAL (XEXP (op, 1)) < precision)
717 return simplify_gen_binary (LSHIFTRT, mode,
718 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
720 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
721 to (ashift:QI (x:QI) C), where C is a suitable small constant and
722 the outer subreg is effectively a truncation to the original mode. */
723 if (GET_CODE (op) == ASHIFT
724 && CONST_INT_P (XEXP (op, 1))
725 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
726 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
727 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
728 && UINTVAL (XEXP (op, 1)) < precision)
729 return simplify_gen_binary (ASHIFT, mode,
730 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
732 /* Recognize a word extraction from a multi-word subreg. */
733 if ((GET_CODE (op) == LSHIFTRT
734 || GET_CODE (op) == ASHIFTRT)
735 && SCALAR_INT_MODE_P (mode)
736 && SCALAR_INT_MODE_P (op_mode)
737 && precision >= BITS_PER_WORD
738 && 2 * precision <= op_precision
739 && CONST_INT_P (XEXP (op, 1))
740 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
741 && UINTVAL (XEXP (op, 1)) < op_precision)
743 int byte = subreg_lowpart_offset (mode, op_mode);
744 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
745 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
746 (WORDS_BIG_ENDIAN
747 ? byte - shifted_bytes
748 : byte + shifted_bytes));
751 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
752 and try replacing the TRUNCATE and shift with it. Don't do this
753 if the MEM has a mode-dependent address. */
754 if ((GET_CODE (op) == LSHIFTRT
755 || GET_CODE (op) == ASHIFTRT)
756 && SCALAR_INT_MODE_P (op_mode)
757 && MEM_P (XEXP (op, 0))
758 && CONST_INT_P (XEXP (op, 1))
759 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
760 && INTVAL (XEXP (op, 1)) > 0
761 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
762 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
763 MEM_ADDR_SPACE (XEXP (op, 0)))
764 && ! MEM_VOLATILE_P (XEXP (op, 0))
765 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
766 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
768 int byte = subreg_lowpart_offset (mode, op_mode);
769 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
770 return adjust_address_nv (XEXP (op, 0), mode,
771 (WORDS_BIG_ENDIAN
772 ? byte - shifted_bytes
773 : byte + shifted_bytes));
776 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
777 (OP:SI foo:SI) if OP is NEG or ABS. */
778 if ((GET_CODE (op) == ABS
779 || GET_CODE (op) == NEG)
780 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
781 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
782 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
783 return simplify_gen_unary (GET_CODE (op), mode,
784 XEXP (XEXP (op, 0), 0), mode);
786 /* (truncate:A (subreg:B (truncate:C X) 0)) is
787 (truncate:A X). */
788 if (GET_CODE (op) == SUBREG
789 && SCALAR_INT_MODE_P (mode)
790 && SCALAR_INT_MODE_P (op_mode)
791 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
792 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
793 && subreg_lowpart_p (op))
795 rtx inner = XEXP (SUBREG_REG (op), 0);
796 if (GET_MODE_PRECISION (mode)
797 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
798 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
799 else
800 /* If subreg above is paradoxical and C is narrower
801 than A, return (subreg:A (truncate:C X) 0). */
802 return simplify_gen_subreg (mode, SUBREG_REG (op),
803 GET_MODE (SUBREG_REG (op)), 0);
806 /* (truncate:A (truncate:B X)) is (truncate:A X). */
807 if (GET_CODE (op) == TRUNCATE)
808 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
809 GET_MODE (XEXP (op, 0)));
811 return NULL_RTX;
814 /* Try to simplify a unary operation CODE whose output mode is to be
815 MODE with input operand OP whose mode was originally OP_MODE.
816 Return zero if no simplification can be made. */
818 simplify_unary_operation (enum rtx_code code, machine_mode mode,
819 rtx op, machine_mode op_mode)
821 rtx trueop, tem;
823 trueop = avoid_constant_pool_reference (op);
825 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
826 if (tem)
827 return tem;
829 return simplify_unary_operation_1 (code, mode, op);
832 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
833 to be exact. */
835 static bool
836 exact_int_to_float_conversion_p (const_rtx op)
838 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
839 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
840 /* Constants shouldn't reach here. */
841 gcc_assert (op0_mode != VOIDmode);
842 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
843 int in_bits = in_prec;
844 if (HWI_COMPUTABLE_MODE_P (op0_mode))
846 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
847 if (GET_CODE (op) == FLOAT)
848 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
849 else if (GET_CODE (op) == UNSIGNED_FLOAT)
850 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
851 else
852 gcc_unreachable ();
853 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
855 return in_bits <= out_bits;
858 /* Perform some simplifications we can do even if the operands
859 aren't constant. */
860 static rtx
861 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
863 enum rtx_code reversed;
864 rtx temp;
866 switch (code)
868 case NOT:
869 /* (not (not X)) == X. */
870 if (GET_CODE (op) == NOT)
871 return XEXP (op, 0);
873 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
874 comparison is all ones. */
875 if (COMPARISON_P (op)
876 && (mode == BImode || STORE_FLAG_VALUE == -1)
877 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
878 return simplify_gen_relational (reversed, mode, VOIDmode,
879 XEXP (op, 0), XEXP (op, 1));
881 /* (not (plus X -1)) can become (neg X). */
882 if (GET_CODE (op) == PLUS
883 && XEXP (op, 1) == constm1_rtx)
884 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
886 /* Similarly, (not (neg X)) is (plus X -1). */
887 if (GET_CODE (op) == NEG)
888 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
889 CONSTM1_RTX (mode));
891 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
892 if (GET_CODE (op) == XOR
893 && CONST_INT_P (XEXP (op, 1))
894 && (temp = simplify_unary_operation (NOT, mode,
895 XEXP (op, 1), mode)) != 0)
896 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
898 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
899 if (GET_CODE (op) == PLUS
900 && CONST_INT_P (XEXP (op, 1))
901 && mode_signbit_p (mode, XEXP (op, 1))
902 && (temp = simplify_unary_operation (NOT, mode,
903 XEXP (op, 1), mode)) != 0)
904 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
907 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
908 operands other than 1, but that is not valid. We could do a
909 similar simplification for (not (lshiftrt C X)) where C is
910 just the sign bit, but this doesn't seem common enough to
911 bother with. */
912 if (GET_CODE (op) == ASHIFT
913 && XEXP (op, 0) == const1_rtx)
915 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
916 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
919 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
920 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
921 so we can perform the above simplification. */
922 if (STORE_FLAG_VALUE == -1
923 && GET_CODE (op) == ASHIFTRT
924 && CONST_INT_P (XEXP (op, 1))
925 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
926 return simplify_gen_relational (GE, mode, VOIDmode,
927 XEXP (op, 0), const0_rtx);
930 if (GET_CODE (op) == SUBREG
931 && subreg_lowpart_p (op)
932 && (GET_MODE_SIZE (GET_MODE (op))
933 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
934 && GET_CODE (SUBREG_REG (op)) == ASHIFT
935 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
937 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
938 rtx x;
940 x = gen_rtx_ROTATE (inner_mode,
941 simplify_gen_unary (NOT, inner_mode, const1_rtx,
942 inner_mode),
943 XEXP (SUBREG_REG (op), 1));
944 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
945 if (temp)
946 return temp;
949 /* Apply De Morgan's laws to reduce number of patterns for machines
950 with negating logical insns (and-not, nand, etc.). If result has
951 only one NOT, put it first, since that is how the patterns are
952 coded. */
953 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
955 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
956 machine_mode op_mode;
958 op_mode = GET_MODE (in1);
959 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
961 op_mode = GET_MODE (in2);
962 if (op_mode == VOIDmode)
963 op_mode = mode;
964 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
966 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
967 std::swap (in1, in2);
969 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
970 mode, in1, in2);
973 /* (not (bswap x)) -> (bswap (not x)). */
974 if (GET_CODE (op) == BSWAP)
976 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
977 return simplify_gen_unary (BSWAP, mode, x, mode);
979 break;
981 case NEG:
982 /* (neg (neg X)) == X. */
983 if (GET_CODE (op) == NEG)
984 return XEXP (op, 0);
986 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
987 If comparison is not reversible use
988 x ? y : (neg y). */
989 if (GET_CODE (op) == IF_THEN_ELSE)
991 rtx cond = XEXP (op, 0);
992 rtx true_rtx = XEXP (op, 1);
993 rtx false_rtx = XEXP (op, 2);
995 if ((GET_CODE (true_rtx) == NEG
996 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
997 || (GET_CODE (false_rtx) == NEG
998 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1000 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
1001 temp = reversed_comparison (cond, mode);
1002 else
1004 temp = cond;
1005 std::swap (true_rtx, false_rtx);
1007 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1008 mode, temp, true_rtx, false_rtx);
1012 /* (neg (plus X 1)) can become (not X). */
1013 if (GET_CODE (op) == PLUS
1014 && XEXP (op, 1) == const1_rtx)
1015 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1017 /* Similarly, (neg (not X)) is (plus X 1). */
1018 if (GET_CODE (op) == NOT)
1019 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1020 CONST1_RTX (mode));
1022 /* (neg (minus X Y)) can become (minus Y X). This transformation
1023 isn't safe for modes with signed zeros, since if X and Y are
1024 both +0, (minus Y X) is the same as (minus X Y). If the
1025 rounding mode is towards +infinity (or -infinity) then the two
1026 expressions will be rounded differently. */
1027 if (GET_CODE (op) == MINUS
1028 && !HONOR_SIGNED_ZEROS (mode)
1029 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1030 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1032 if (GET_CODE (op) == PLUS
1033 && !HONOR_SIGNED_ZEROS (mode)
1034 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1036 /* (neg (plus A C)) is simplified to (minus -C A). */
1037 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1038 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1040 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1041 if (temp)
1042 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1045 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1046 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1047 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1050 /* (neg (mult A B)) becomes (mult A (neg B)).
1051 This works even for floating-point values. */
1052 if (GET_CODE (op) == MULT
1053 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1055 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1056 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1059 /* NEG commutes with ASHIFT since it is multiplication. Only do
1060 this if we can then eliminate the NEG (e.g., if the operand
1061 is a constant). */
1062 if (GET_CODE (op) == ASHIFT)
1064 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1065 if (temp)
1066 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1069 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1070 C is equal to the width of MODE minus 1. */
1071 if (GET_CODE (op) == ASHIFTRT
1072 && CONST_INT_P (XEXP (op, 1))
1073 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1074 return simplify_gen_binary (LSHIFTRT, mode,
1075 XEXP (op, 0), XEXP (op, 1));
1077 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1078 C is equal to the width of MODE minus 1. */
1079 if (GET_CODE (op) == LSHIFTRT
1080 && CONST_INT_P (XEXP (op, 1))
1081 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1082 return simplify_gen_binary (ASHIFTRT, mode,
1083 XEXP (op, 0), XEXP (op, 1));
1085 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1086 if (GET_CODE (op) == XOR
1087 && XEXP (op, 1) == const1_rtx
1088 && nonzero_bits (XEXP (op, 0), mode) == 1)
1089 return plus_constant (mode, XEXP (op, 0), -1);
1091 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1092 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1093 if (GET_CODE (op) == LT
1094 && XEXP (op, 1) == const0_rtx
1095 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1097 machine_mode inner = GET_MODE (XEXP (op, 0));
1098 int isize = GET_MODE_PRECISION (inner);
1099 if (STORE_FLAG_VALUE == 1)
1101 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1102 GEN_INT (isize - 1));
1103 if (mode == inner)
1104 return temp;
1105 if (GET_MODE_PRECISION (mode) > isize)
1106 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1107 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1109 else if (STORE_FLAG_VALUE == -1)
1111 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1112 GEN_INT (isize - 1));
1113 if (mode == inner)
1114 return temp;
1115 if (GET_MODE_PRECISION (mode) > isize)
1116 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1117 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1120 break;
1122 case TRUNCATE:
1123 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1124 with the umulXi3_highpart patterns. */
1125 if (GET_CODE (op) == LSHIFTRT
1126 && GET_CODE (XEXP (op, 0)) == MULT)
1127 break;
1129 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1131 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1133 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1134 if (temp)
1135 return temp;
1137 /* We can't handle truncation to a partial integer mode here
1138 because we don't know the real bitsize of the partial
1139 integer mode. */
1140 break;
1143 if (GET_MODE (op) != VOIDmode)
1145 temp = simplify_truncation (mode, op, GET_MODE (op));
1146 if (temp)
1147 return temp;
1150 /* If we know that the value is already truncated, we can
1151 replace the TRUNCATE with a SUBREG. */
1152 if (GET_MODE_NUNITS (mode) == 1
1153 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1154 || truncated_to_mode (mode, op)))
1156 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1157 if (temp)
1158 return temp;
1161 /* A truncate of a comparison can be replaced with a subreg if
1162 STORE_FLAG_VALUE permits. This is like the previous test,
1163 but it works even if the comparison is done in a mode larger
1164 than HOST_BITS_PER_WIDE_INT. */
1165 if (HWI_COMPUTABLE_MODE_P (mode)
1166 && COMPARISON_P (op)
1167 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1169 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1170 if (temp)
1171 return temp;
1174 /* A truncate of a memory is just loading the low part of the memory
1175 if we are not changing the meaning of the address. */
1176 if (GET_CODE (op) == MEM
1177 && !VECTOR_MODE_P (mode)
1178 && !MEM_VOLATILE_P (op)
1179 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1181 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1182 if (temp)
1183 return temp;
1186 break;
1188 case FLOAT_TRUNCATE:
1189 if (DECIMAL_FLOAT_MODE_P (mode))
1190 break;
1192 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1193 if (GET_CODE (op) == FLOAT_EXTEND
1194 && GET_MODE (XEXP (op, 0)) == mode)
1195 return XEXP (op, 0);
1197 /* (float_truncate:SF (float_truncate:DF foo:XF))
1198 = (float_truncate:SF foo:XF).
1199 This may eliminate double rounding, so it is unsafe.
1201 (float_truncate:SF (float_extend:XF foo:DF))
1202 = (float_truncate:SF foo:DF).
1204 (float_truncate:DF (float_extend:XF foo:SF))
1205 = (float_extend:DF foo:SF). */
1206 if ((GET_CODE (op) == FLOAT_TRUNCATE
1207 && flag_unsafe_math_optimizations)
1208 || GET_CODE (op) == FLOAT_EXTEND)
1209 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1210 0)))
1211 > GET_MODE_SIZE (mode)
1212 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1213 mode,
1214 XEXP (op, 0), mode);
1216 /* (float_truncate (float x)) is (float x) */
1217 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1218 && (flag_unsafe_math_optimizations
1219 || exact_int_to_float_conversion_p (op)))
1220 return simplify_gen_unary (GET_CODE (op), mode,
1221 XEXP (op, 0),
1222 GET_MODE (XEXP (op, 0)));
1224 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1225 (OP:SF foo:SF) if OP is NEG or ABS. */
1226 if ((GET_CODE (op) == ABS
1227 || GET_CODE (op) == NEG)
1228 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1229 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1230 return simplify_gen_unary (GET_CODE (op), mode,
1231 XEXP (XEXP (op, 0), 0), mode);
1233 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1234 is (float_truncate:SF x). */
1235 if (GET_CODE (op) == SUBREG
1236 && subreg_lowpart_p (op)
1237 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1238 return SUBREG_REG (op);
1239 break;
1241 case FLOAT_EXTEND:
1242 if (DECIMAL_FLOAT_MODE_P (mode))
1243 break;
1245 /* (float_extend (float_extend x)) is (float_extend x)
1247 (float_extend (float x)) is (float x) assuming that double
1248 rounding can't happen.
1250 if (GET_CODE (op) == FLOAT_EXTEND
1251 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1252 && exact_int_to_float_conversion_p (op)))
1253 return simplify_gen_unary (GET_CODE (op), mode,
1254 XEXP (op, 0),
1255 GET_MODE (XEXP (op, 0)));
1257 break;
1259 case ABS:
1260 /* (abs (neg <foo>)) -> (abs <foo>) */
1261 if (GET_CODE (op) == NEG)
1262 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1263 GET_MODE (XEXP (op, 0)));
1265 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1266 do nothing. */
1267 if (GET_MODE (op) == VOIDmode)
1268 break;
1270 /* If operand is something known to be positive, ignore the ABS. */
1271 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1272 || val_signbit_known_clear_p (GET_MODE (op),
1273 nonzero_bits (op, GET_MODE (op))))
1274 return op;
1276 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1277 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1278 return gen_rtx_NEG (mode, op);
1280 break;
1282 case FFS:
1283 /* (ffs (*_extend <X>)) = (ffs <X>) */
1284 if (GET_CODE (op) == SIGN_EXTEND
1285 || GET_CODE (op) == ZERO_EXTEND)
1286 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1287 GET_MODE (XEXP (op, 0)));
1288 break;
1290 case POPCOUNT:
1291 switch (GET_CODE (op))
1293 case BSWAP:
1294 case ZERO_EXTEND:
1295 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1296 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1297 GET_MODE (XEXP (op, 0)));
1299 case ROTATE:
1300 case ROTATERT:
1301 /* Rotations don't affect popcount. */
1302 if (!side_effects_p (XEXP (op, 1)))
1303 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1304 GET_MODE (XEXP (op, 0)));
1305 break;
1307 default:
1308 break;
1310 break;
1312 case PARITY:
1313 switch (GET_CODE (op))
1315 case NOT:
1316 case BSWAP:
1317 case ZERO_EXTEND:
1318 case SIGN_EXTEND:
1319 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1320 GET_MODE (XEXP (op, 0)));
1322 case ROTATE:
1323 case ROTATERT:
1324 /* Rotations don't affect parity. */
1325 if (!side_effects_p (XEXP (op, 1)))
1326 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1327 GET_MODE (XEXP (op, 0)));
1328 break;
1330 default:
1331 break;
1333 break;
1335 case BSWAP:
1336 /* (bswap (bswap x)) -> x. */
1337 if (GET_CODE (op) == BSWAP)
1338 return XEXP (op, 0);
1339 break;
1341 case FLOAT:
1342 /* (float (sign_extend <X>)) = (float <X>). */
1343 if (GET_CODE (op) == SIGN_EXTEND)
1344 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1345 GET_MODE (XEXP (op, 0)));
1346 break;
1348 case SIGN_EXTEND:
1349 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1350 becomes just the MINUS if its mode is MODE. This allows
1351 folding switch statements on machines using casesi (such as
1352 the VAX). */
1353 if (GET_CODE (op) == TRUNCATE
1354 && GET_MODE (XEXP (op, 0)) == mode
1355 && GET_CODE (XEXP (op, 0)) == MINUS
1356 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1357 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1358 return XEXP (op, 0);
1360 /* Extending a widening multiplication should be canonicalized to
1361 a wider widening multiplication. */
1362 if (GET_CODE (op) == MULT)
1364 rtx lhs = XEXP (op, 0);
1365 rtx rhs = XEXP (op, 1);
1366 enum rtx_code lcode = GET_CODE (lhs);
1367 enum rtx_code rcode = GET_CODE (rhs);
1369 /* Widening multiplies usually extend both operands, but sometimes
1370 they use a shift to extract a portion of a register. */
1371 if ((lcode == SIGN_EXTEND
1372 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1373 && (rcode == SIGN_EXTEND
1374 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1376 machine_mode lmode = GET_MODE (lhs);
1377 machine_mode rmode = GET_MODE (rhs);
1378 int bits;
1380 if (lcode == ASHIFTRT)
1381 /* Number of bits not shifted off the end. */
1382 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1383 else /* lcode == SIGN_EXTEND */
1384 /* Size of inner mode. */
1385 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1387 if (rcode == ASHIFTRT)
1388 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1389 else /* rcode == SIGN_EXTEND */
1390 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1392 /* We can only widen multiplies if the result is mathematiclly
1393 equivalent. I.e. if overflow was impossible. */
1394 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1395 return simplify_gen_binary
1396 (MULT, mode,
1397 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1398 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1402 /* Check for a sign extension of a subreg of a promoted
1403 variable, where the promotion is sign-extended, and the
1404 target mode is the same as the variable's promotion. */
1405 if (GET_CODE (op) == SUBREG
1406 && SUBREG_PROMOTED_VAR_P (op)
1407 && SUBREG_PROMOTED_SIGNED_P (op)
1408 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1410 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1411 if (temp)
1412 return temp;
1415 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1416 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1417 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1419 gcc_assert (GET_MODE_PRECISION (mode)
1420 > GET_MODE_PRECISION (GET_MODE (op)));
1421 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1422 GET_MODE (XEXP (op, 0)));
1425 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1426 is (sign_extend:M (subreg:O <X>)) if there is mode with
1427 GET_MODE_BITSIZE (N) - I bits.
1428 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1429 is similarly (zero_extend:M (subreg:O <X>)). */
1430 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1431 && GET_CODE (XEXP (op, 0)) == ASHIFT
1432 && CONST_INT_P (XEXP (op, 1))
1433 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1434 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1436 machine_mode tmode
1437 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1438 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1439 gcc_assert (GET_MODE_BITSIZE (mode)
1440 > GET_MODE_BITSIZE (GET_MODE (op)));
1441 if (tmode != BLKmode)
1443 rtx inner =
1444 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1445 if (inner)
1446 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1447 ? SIGN_EXTEND : ZERO_EXTEND,
1448 mode, inner, tmode);
1452 #if defined(POINTERS_EXTEND_UNSIGNED)
1453 /* As we do not know which address space the pointer is referring to,
1454 we can do this only if the target does not support different pointer
1455 or address modes depending on the address space. */
1456 if (target_default_pointer_address_modes_p ()
1457 && ! POINTERS_EXTEND_UNSIGNED
1458 && mode == Pmode && GET_MODE (op) == ptr_mode
1459 && (CONSTANT_P (op)
1460 || (GET_CODE (op) == SUBREG
1461 && REG_P (SUBREG_REG (op))
1462 && REG_POINTER (SUBREG_REG (op))
1463 && GET_MODE (SUBREG_REG (op)) == Pmode))
1464 && !targetm.have_ptr_extend ())
1465 return convert_memory_address (Pmode, op);
1466 #endif
1467 break;
1469 case ZERO_EXTEND:
1470 /* Check for a zero extension of a subreg of a promoted
1471 variable, where the promotion is zero-extended, and the
1472 target mode is the same as the variable's promotion. */
1473 if (GET_CODE (op) == SUBREG
1474 && SUBREG_PROMOTED_VAR_P (op)
1475 && SUBREG_PROMOTED_UNSIGNED_P (op)
1476 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1478 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1479 if (temp)
1480 return temp;
1483 /* Extending a widening multiplication should be canonicalized to
1484 a wider widening multiplication. */
1485 if (GET_CODE (op) == MULT)
1487 rtx lhs = XEXP (op, 0);
1488 rtx rhs = XEXP (op, 1);
1489 enum rtx_code lcode = GET_CODE (lhs);
1490 enum rtx_code rcode = GET_CODE (rhs);
1492 /* Widening multiplies usually extend both operands, but sometimes
1493 they use a shift to extract a portion of a register. */
1494 if ((lcode == ZERO_EXTEND
1495 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1496 && (rcode == ZERO_EXTEND
1497 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1499 machine_mode lmode = GET_MODE (lhs);
1500 machine_mode rmode = GET_MODE (rhs);
1501 int bits;
1503 if (lcode == LSHIFTRT)
1504 /* Number of bits not shifted off the end. */
1505 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1506 else /* lcode == ZERO_EXTEND */
1507 /* Size of inner mode. */
1508 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1510 if (rcode == LSHIFTRT)
1511 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1512 else /* rcode == ZERO_EXTEND */
1513 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1515 /* We can only widen multiplies if the result is mathematiclly
1516 equivalent. I.e. if overflow was impossible. */
1517 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1518 return simplify_gen_binary
1519 (MULT, mode,
1520 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1521 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1525 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1526 if (GET_CODE (op) == ZERO_EXTEND)
1527 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1528 GET_MODE (XEXP (op, 0)));
1530 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1531 is (zero_extend:M (subreg:O <X>)) if there is mode with
1532 GET_MODE_PRECISION (N) - I bits. */
1533 if (GET_CODE (op) == LSHIFTRT
1534 && GET_CODE (XEXP (op, 0)) == ASHIFT
1535 && CONST_INT_P (XEXP (op, 1))
1536 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1537 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1539 machine_mode tmode
1540 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1541 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1542 if (tmode != BLKmode)
1544 rtx inner =
1545 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1546 if (inner)
1547 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1551 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1552 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1553 of mode N. E.g.
1554 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1555 (and:SI (reg:SI) (const_int 63)). */
1556 if (GET_CODE (op) == SUBREG
1557 && GET_MODE_PRECISION (GET_MODE (op))
1558 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1559 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1560 <= HOST_BITS_PER_WIDE_INT
1561 && GET_MODE_PRECISION (mode)
1562 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1563 && subreg_lowpart_p (op)
1564 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1565 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1567 if (GET_MODE_PRECISION (mode)
1568 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1569 return SUBREG_REG (op);
1570 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1571 GET_MODE (SUBREG_REG (op)));
1574 #if defined(POINTERS_EXTEND_UNSIGNED)
1575 /* As we do not know which address space the pointer is referring to,
1576 we can do this only if the target does not support different pointer
1577 or address modes depending on the address space. */
1578 if (target_default_pointer_address_modes_p ()
1579 && POINTERS_EXTEND_UNSIGNED > 0
1580 && mode == Pmode && GET_MODE (op) == ptr_mode
1581 && (CONSTANT_P (op)
1582 || (GET_CODE (op) == SUBREG
1583 && REG_P (SUBREG_REG (op))
1584 && REG_POINTER (SUBREG_REG (op))
1585 && GET_MODE (SUBREG_REG (op)) == Pmode))
1586 && !targetm.have_ptr_extend ())
1587 return convert_memory_address (Pmode, op);
1588 #endif
1589 break;
1591 default:
1592 break;
1595 return 0;
1598 /* Try to compute the value of a unary operation CODE whose output mode is to
1599 be MODE with input operand OP whose mode was originally OP_MODE.
1600 Return zero if the value cannot be computed. */
1602 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1603 rtx op, machine_mode op_mode)
1605 unsigned int width = GET_MODE_PRECISION (mode);
1607 if (code == VEC_DUPLICATE)
1609 gcc_assert (VECTOR_MODE_P (mode));
1610 if (GET_MODE (op) != VOIDmode)
1612 if (!VECTOR_MODE_P (GET_MODE (op)))
1613 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1614 else
1615 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1616 (GET_MODE (op)));
1618 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1619 || GET_CODE (op) == CONST_VECTOR)
1621 int elt_size = GET_MODE_UNIT_SIZE (mode);
1622 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1623 rtvec v = rtvec_alloc (n_elts);
1624 unsigned int i;
1626 if (GET_CODE (op) != CONST_VECTOR)
1627 for (i = 0; i < n_elts; i++)
1628 RTVEC_ELT (v, i) = op;
1629 else
1631 machine_mode inmode = GET_MODE (op);
1632 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1633 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1635 gcc_assert (in_n_elts < n_elts);
1636 gcc_assert ((n_elts % in_n_elts) == 0);
1637 for (i = 0; i < n_elts; i++)
1638 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1640 return gen_rtx_CONST_VECTOR (mode, v);
1644 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1646 int elt_size = GET_MODE_UNIT_SIZE (mode);
1647 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1648 machine_mode opmode = GET_MODE (op);
1649 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1650 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1651 rtvec v = rtvec_alloc (n_elts);
1652 unsigned int i;
1654 gcc_assert (op_n_elts == n_elts);
1655 for (i = 0; i < n_elts; i++)
1657 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1658 CONST_VECTOR_ELT (op, i),
1659 GET_MODE_INNER (opmode));
1660 if (!x)
1661 return 0;
1662 RTVEC_ELT (v, i) = x;
1664 return gen_rtx_CONST_VECTOR (mode, v);
1667 /* The order of these tests is critical so that, for example, we don't
1668 check the wrong mode (input vs. output) for a conversion operation,
1669 such as FIX. At some point, this should be simplified. */
1671 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1673 REAL_VALUE_TYPE d;
1675 if (op_mode == VOIDmode)
1677 /* CONST_INT have VOIDmode as the mode. We assume that all
1678 the bits of the constant are significant, though, this is
1679 a dangerous assumption as many times CONST_INTs are
1680 created and used with garbage in the bits outside of the
1681 precision of the implied mode of the const_int. */
1682 op_mode = MAX_MODE_INT;
1685 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1686 d = real_value_truncate (mode, d);
1687 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1689 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1691 REAL_VALUE_TYPE d;
1693 if (op_mode == VOIDmode)
1695 /* CONST_INT have VOIDmode as the mode. We assume that all
1696 the bits of the constant are significant, though, this is
1697 a dangerous assumption as many times CONST_INTs are
1698 created and used with garbage in the bits outside of the
1699 precision of the implied mode of the const_int. */
1700 op_mode = MAX_MODE_INT;
1703 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1704 d = real_value_truncate (mode, d);
1705 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1708 if (CONST_SCALAR_INT_P (op) && width > 0)
1710 wide_int result;
1711 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1712 rtx_mode_t op0 = std::make_pair (op, imode);
1713 int int_value;
1715 #if TARGET_SUPPORTS_WIDE_INT == 0
1716 /* This assert keeps the simplification from producing a result
1717 that cannot be represented in a CONST_DOUBLE but a lot of
1718 upstream callers expect that this function never fails to
1719 simplify something and so you if you added this to the test
1720 above the code would die later anyway. If this assert
1721 happens, you just need to make the port support wide int. */
1722 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1723 #endif
1725 switch (code)
1727 case NOT:
1728 result = wi::bit_not (op0);
1729 break;
1731 case NEG:
1732 result = wi::neg (op0);
1733 break;
1735 case ABS:
1736 result = wi::abs (op0);
1737 break;
1739 case FFS:
1740 result = wi::shwi (wi::ffs (op0), mode);
1741 break;
1743 case CLZ:
1744 if (wi::ne_p (op0, 0))
1745 int_value = wi::clz (op0);
1746 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1747 int_value = GET_MODE_PRECISION (mode);
1748 result = wi::shwi (int_value, mode);
1749 break;
1751 case CLRSB:
1752 result = wi::shwi (wi::clrsb (op0), mode);
1753 break;
1755 case CTZ:
1756 if (wi::ne_p (op0, 0))
1757 int_value = wi::ctz (op0);
1758 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1759 int_value = GET_MODE_PRECISION (mode);
1760 result = wi::shwi (int_value, mode);
1761 break;
1763 case POPCOUNT:
1764 result = wi::shwi (wi::popcount (op0), mode);
1765 break;
1767 case PARITY:
1768 result = wi::shwi (wi::parity (op0), mode);
1769 break;
1771 case BSWAP:
1772 result = wide_int (op0).bswap ();
1773 break;
1775 case TRUNCATE:
1776 case ZERO_EXTEND:
1777 result = wide_int::from (op0, width, UNSIGNED);
1778 break;
1780 case SIGN_EXTEND:
1781 result = wide_int::from (op0, width, SIGNED);
1782 break;
1784 case SQRT:
1785 default:
1786 return 0;
1789 return immed_wide_int_const (result, mode);
1792 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1793 && SCALAR_FLOAT_MODE_P (mode)
1794 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1796 REAL_VALUE_TYPE d;
1797 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1799 switch (code)
1801 case SQRT:
1802 return 0;
1803 case ABS:
1804 d = real_value_abs (&d);
1805 break;
1806 case NEG:
1807 d = real_value_negate (&d);
1808 break;
1809 case FLOAT_TRUNCATE:
1810 d = real_value_truncate (mode, d);
1811 break;
1812 case FLOAT_EXTEND:
1813 /* All this does is change the mode, unless changing
1814 mode class. */
1815 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1816 real_convert (&d, mode, &d);
1817 break;
1818 case FIX:
1819 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1820 break;
1821 case NOT:
1823 long tmp[4];
1824 int i;
1826 real_to_target (tmp, &d, GET_MODE (op));
1827 for (i = 0; i < 4; i++)
1828 tmp[i] = ~tmp[i];
1829 real_from_target (&d, tmp, mode);
1830 break;
1832 default:
1833 gcc_unreachable ();
1835 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1837 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1838 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1839 && GET_MODE_CLASS (mode) == MODE_INT
1840 && width > 0)
1842 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1843 operators are intentionally left unspecified (to ease implementation
1844 by target backends), for consistency, this routine implements the
1845 same semantics for constant folding as used by the middle-end. */
1847 /* This was formerly used only for non-IEEE float.
1848 eggert@twinsun.com says it is safe for IEEE also. */
1849 REAL_VALUE_TYPE x, t;
1850 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1851 wide_int wmax, wmin;
1852 /* This is part of the abi to real_to_integer, but we check
1853 things before making this call. */
1854 bool fail;
1856 switch (code)
1858 case FIX:
1859 if (REAL_VALUE_ISNAN (x))
1860 return const0_rtx;
1862 /* Test against the signed upper bound. */
1863 wmax = wi::max_value (width, SIGNED);
1864 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1865 if (REAL_VALUES_LESS (t, x))
1866 return immed_wide_int_const (wmax, mode);
1868 /* Test against the signed lower bound. */
1869 wmin = wi::min_value (width, SIGNED);
1870 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1871 if (REAL_VALUES_LESS (x, t))
1872 return immed_wide_int_const (wmin, mode);
1874 return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
1875 break;
1877 case UNSIGNED_FIX:
1878 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1879 return const0_rtx;
1881 /* Test against the unsigned upper bound. */
1882 wmax = wi::max_value (width, UNSIGNED);
1883 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1884 if (REAL_VALUES_LESS (t, x))
1885 return immed_wide_int_const (wmax, mode);
1887 return immed_wide_int_const (real_to_integer (&x, &fail, width),
1888 mode);
1889 break;
1891 default:
1892 gcc_unreachable ();
1896 return NULL_RTX;
1899 /* Subroutine of simplify_binary_operation to simplify a binary operation
1900 CODE that can commute with byte swapping, with result mode MODE and
1901 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1902 Return zero if no simplification or canonicalization is possible. */
1904 static rtx
1905 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1906 rtx op0, rtx op1)
1908 rtx tem;
1910 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1911 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1913 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1914 simplify_gen_unary (BSWAP, mode, op1, mode));
1915 return simplify_gen_unary (BSWAP, mode, tem, mode);
1918 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1919 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1921 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1922 return simplify_gen_unary (BSWAP, mode, tem, mode);
1925 return NULL_RTX;
1928 /* Subroutine of simplify_binary_operation to simplify a commutative,
1929 associative binary operation CODE with result mode MODE, operating
1930 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1931 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1932 canonicalization is possible. */
1934 static rtx
1935 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1936 rtx op0, rtx op1)
1938 rtx tem;
1940 /* Linearize the operator to the left. */
1941 if (GET_CODE (op1) == code)
1943 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1944 if (GET_CODE (op0) == code)
1946 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1947 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1950 /* "a op (b op c)" becomes "(b op c) op a". */
1951 if (! swap_commutative_operands_p (op1, op0))
1952 return simplify_gen_binary (code, mode, op1, op0);
1954 std::swap (op0, op1);
1957 if (GET_CODE (op0) == code)
1959 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1960 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1962 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1963 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1966 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1967 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1968 if (tem != 0)
1969 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1971 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1972 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1973 if (tem != 0)
1974 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1977 return 0;
1981 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1982 and OP1. Return 0 if no simplification is possible.
1984 Don't use this for relational operations such as EQ or LT.
1985 Use simplify_relational_operation instead. */
1987 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1988 rtx op0, rtx op1)
1990 rtx trueop0, trueop1;
1991 rtx tem;
1993 /* Relational operations don't work here. We must know the mode
1994 of the operands in order to do the comparison correctly.
1995 Assuming a full word can give incorrect results.
1996 Consider comparing 128 with -128 in QImode. */
1997 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1998 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2000 /* Make sure the constant is second. */
2001 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2002 && swap_commutative_operands_p (op0, op1))
2003 std::swap (op0, op1);
2005 trueop0 = avoid_constant_pool_reference (op0);
2006 trueop1 = avoid_constant_pool_reference (op1);
2008 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2009 if (tem)
2010 return tem;
2011 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2014 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2015 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2016 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2017 actual constants. */
2019 static rtx
2020 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2021 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2023 rtx tem, reversed, opleft, opright;
2024 HOST_WIDE_INT val;
2025 unsigned int width = GET_MODE_PRECISION (mode);
2027 /* Even if we can't compute a constant result,
2028 there are some cases worth simplifying. */
2030 switch (code)
2032 case PLUS:
2033 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2034 when x is NaN, infinite, or finite and nonzero. They aren't
2035 when x is -0 and the rounding mode is not towards -infinity,
2036 since (-0) + 0 is then 0. */
2037 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2038 return op0;
2040 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2041 transformations are safe even for IEEE. */
2042 if (GET_CODE (op0) == NEG)
2043 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2044 else if (GET_CODE (op1) == NEG)
2045 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2047 /* (~a) + 1 -> -a */
2048 if (INTEGRAL_MODE_P (mode)
2049 && GET_CODE (op0) == NOT
2050 && trueop1 == const1_rtx)
2051 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2053 /* Handle both-operands-constant cases. We can only add
2054 CONST_INTs to constants since the sum of relocatable symbols
2055 can't be handled by most assemblers. Don't add CONST_INT
2056 to CONST_INT since overflow won't be computed properly if wider
2057 than HOST_BITS_PER_WIDE_INT. */
2059 if ((GET_CODE (op0) == CONST
2060 || GET_CODE (op0) == SYMBOL_REF
2061 || GET_CODE (op0) == LABEL_REF)
2062 && CONST_INT_P (op1))
2063 return plus_constant (mode, op0, INTVAL (op1));
2064 else if ((GET_CODE (op1) == CONST
2065 || GET_CODE (op1) == SYMBOL_REF
2066 || GET_CODE (op1) == LABEL_REF)
2067 && CONST_INT_P (op0))
2068 return plus_constant (mode, op1, INTVAL (op0));
2070 /* See if this is something like X * C - X or vice versa or
2071 if the multiplication is written as a shift. If so, we can
2072 distribute and make a new multiply, shift, or maybe just
2073 have X (if C is 2 in the example above). But don't make
2074 something more expensive than we had before. */
2076 if (SCALAR_INT_MODE_P (mode))
2078 rtx lhs = op0, rhs = op1;
2080 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2081 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2083 if (GET_CODE (lhs) == NEG)
2085 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2086 lhs = XEXP (lhs, 0);
2088 else if (GET_CODE (lhs) == MULT
2089 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2091 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2092 lhs = XEXP (lhs, 0);
2094 else if (GET_CODE (lhs) == ASHIFT
2095 && CONST_INT_P (XEXP (lhs, 1))
2096 && INTVAL (XEXP (lhs, 1)) >= 0
2097 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2099 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2100 GET_MODE_PRECISION (mode));
2101 lhs = XEXP (lhs, 0);
2104 if (GET_CODE (rhs) == NEG)
2106 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2107 rhs = XEXP (rhs, 0);
2109 else if (GET_CODE (rhs) == MULT
2110 && CONST_INT_P (XEXP (rhs, 1)))
2112 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2113 rhs = XEXP (rhs, 0);
2115 else if (GET_CODE (rhs) == ASHIFT
2116 && CONST_INT_P (XEXP (rhs, 1))
2117 && INTVAL (XEXP (rhs, 1)) >= 0
2118 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2120 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2121 GET_MODE_PRECISION (mode));
2122 rhs = XEXP (rhs, 0);
2125 if (rtx_equal_p (lhs, rhs))
2127 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2128 rtx coeff;
2129 bool speed = optimize_function_for_speed_p (cfun);
2131 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2133 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2134 return (set_src_cost (tem, mode, speed)
2135 <= set_src_cost (orig, mode, speed) ? tem : 0);
2139 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2140 if (CONST_SCALAR_INT_P (op1)
2141 && GET_CODE (op0) == XOR
2142 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2143 && mode_signbit_p (mode, op1))
2144 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2145 simplify_gen_binary (XOR, mode, op1,
2146 XEXP (op0, 1)));
2148 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2149 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2150 && GET_CODE (op0) == MULT
2151 && GET_CODE (XEXP (op0, 0)) == NEG)
2153 rtx in1, in2;
2155 in1 = XEXP (XEXP (op0, 0), 0);
2156 in2 = XEXP (op0, 1);
2157 return simplify_gen_binary (MINUS, mode, op1,
2158 simplify_gen_binary (MULT, mode,
2159 in1, in2));
2162 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2163 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2164 is 1. */
2165 if (COMPARISON_P (op0)
2166 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2167 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2168 && (reversed = reversed_comparison (op0, mode)))
2169 return
2170 simplify_gen_unary (NEG, mode, reversed, mode);
2172 /* If one of the operands is a PLUS or a MINUS, see if we can
2173 simplify this by the associative law.
2174 Don't use the associative law for floating point.
2175 The inaccuracy makes it nonassociative,
2176 and subtle programs can break if operations are associated. */
2178 if (INTEGRAL_MODE_P (mode)
2179 && (plus_minus_operand_p (op0)
2180 || plus_minus_operand_p (op1))
2181 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2182 return tem;
2184 /* Reassociate floating point addition only when the user
2185 specifies associative math operations. */
2186 if (FLOAT_MODE_P (mode)
2187 && flag_associative_math)
2189 tem = simplify_associative_operation (code, mode, op0, op1);
2190 if (tem)
2191 return tem;
2193 break;
2195 case COMPARE:
2196 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2197 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2198 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2199 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2201 rtx xop00 = XEXP (op0, 0);
2202 rtx xop10 = XEXP (op1, 0);
2204 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2205 return xop00;
2207 if (REG_P (xop00) && REG_P (xop10)
2208 && GET_MODE (xop00) == GET_MODE (xop10)
2209 && REGNO (xop00) == REGNO (xop10)
2210 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2211 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2212 return xop00;
2214 break;
2216 case MINUS:
2217 /* We can't assume x-x is 0 even with non-IEEE floating point,
2218 but since it is zero except in very strange circumstances, we
2219 will treat it as zero with -ffinite-math-only. */
2220 if (rtx_equal_p (trueop0, trueop1)
2221 && ! side_effects_p (op0)
2222 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2223 return CONST0_RTX (mode);
2225 /* Change subtraction from zero into negation. (0 - x) is the
2226 same as -x when x is NaN, infinite, or finite and nonzero.
2227 But if the mode has signed zeros, and does not round towards
2228 -infinity, then 0 - 0 is 0, not -0. */
2229 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2230 return simplify_gen_unary (NEG, mode, op1, mode);
2232 /* (-1 - a) is ~a. */
2233 if (trueop0 == constm1_rtx)
2234 return simplify_gen_unary (NOT, mode, op1, mode);
2236 /* Subtracting 0 has no effect unless the mode has signed zeros
2237 and supports rounding towards -infinity. In such a case,
2238 0 - 0 is -0. */
2239 if (!(HONOR_SIGNED_ZEROS (mode)
2240 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2241 && trueop1 == CONST0_RTX (mode))
2242 return op0;
2244 /* See if this is something like X * C - X or vice versa or
2245 if the multiplication is written as a shift. If so, we can
2246 distribute and make a new multiply, shift, or maybe just
2247 have X (if C is 2 in the example above). But don't make
2248 something more expensive than we had before. */
2250 if (SCALAR_INT_MODE_P (mode))
2252 rtx lhs = op0, rhs = op1;
2254 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2255 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2257 if (GET_CODE (lhs) == NEG)
2259 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2260 lhs = XEXP (lhs, 0);
2262 else if (GET_CODE (lhs) == MULT
2263 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2265 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2266 lhs = XEXP (lhs, 0);
2268 else if (GET_CODE (lhs) == ASHIFT
2269 && CONST_INT_P (XEXP (lhs, 1))
2270 && INTVAL (XEXP (lhs, 1)) >= 0
2271 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2273 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2274 GET_MODE_PRECISION (mode));
2275 lhs = XEXP (lhs, 0);
2278 if (GET_CODE (rhs) == NEG)
2280 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2281 rhs = XEXP (rhs, 0);
2283 else if (GET_CODE (rhs) == MULT
2284 && CONST_INT_P (XEXP (rhs, 1)))
2286 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2287 rhs = XEXP (rhs, 0);
2289 else if (GET_CODE (rhs) == ASHIFT
2290 && CONST_INT_P (XEXP (rhs, 1))
2291 && INTVAL (XEXP (rhs, 1)) >= 0
2292 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2294 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2295 GET_MODE_PRECISION (mode));
2296 negcoeff1 = -negcoeff1;
2297 rhs = XEXP (rhs, 0);
2300 if (rtx_equal_p (lhs, rhs))
2302 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2303 rtx coeff;
2304 bool speed = optimize_function_for_speed_p (cfun);
2306 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2308 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2309 return (set_src_cost (tem, mode, speed)
2310 <= set_src_cost (orig, mode, speed) ? tem : 0);
2314 /* (a - (-b)) -> (a + b). True even for IEEE. */
2315 if (GET_CODE (op1) == NEG)
2316 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2318 /* (-x - c) may be simplified as (-c - x). */
2319 if (GET_CODE (op0) == NEG
2320 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2322 tem = simplify_unary_operation (NEG, mode, op1, mode);
2323 if (tem)
2324 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2327 /* Don't let a relocatable value get a negative coeff. */
2328 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2329 return simplify_gen_binary (PLUS, mode,
2330 op0,
2331 neg_const_int (mode, op1));
2333 /* (x - (x & y)) -> (x & ~y) */
2334 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2336 if (rtx_equal_p (op0, XEXP (op1, 0)))
2338 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2339 GET_MODE (XEXP (op1, 1)));
2340 return simplify_gen_binary (AND, mode, op0, tem);
2342 if (rtx_equal_p (op0, XEXP (op1, 1)))
2344 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2345 GET_MODE (XEXP (op1, 0)));
2346 return simplify_gen_binary (AND, mode, op0, tem);
2350 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2351 by reversing the comparison code if valid. */
2352 if (STORE_FLAG_VALUE == 1
2353 && trueop0 == const1_rtx
2354 && COMPARISON_P (op1)
2355 && (reversed = reversed_comparison (op1, mode)))
2356 return reversed;
2358 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2359 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2360 && GET_CODE (op1) == MULT
2361 && GET_CODE (XEXP (op1, 0)) == NEG)
2363 rtx in1, in2;
2365 in1 = XEXP (XEXP (op1, 0), 0);
2366 in2 = XEXP (op1, 1);
2367 return simplify_gen_binary (PLUS, mode,
2368 simplify_gen_binary (MULT, mode,
2369 in1, in2),
2370 op0);
2373 /* Canonicalize (minus (neg A) (mult B C)) to
2374 (minus (mult (neg B) C) A). */
2375 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2376 && GET_CODE (op1) == MULT
2377 && GET_CODE (op0) == NEG)
2379 rtx in1, in2;
2381 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2382 in2 = XEXP (op1, 1);
2383 return simplify_gen_binary (MINUS, mode,
2384 simplify_gen_binary (MULT, mode,
2385 in1, in2),
2386 XEXP (op0, 0));
2389 /* If one of the operands is a PLUS or a MINUS, see if we can
2390 simplify this by the associative law. This will, for example,
2391 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2392 Don't use the associative law for floating point.
2393 The inaccuracy makes it nonassociative,
2394 and subtle programs can break if operations are associated. */
2396 if (INTEGRAL_MODE_P (mode)
2397 && (plus_minus_operand_p (op0)
2398 || plus_minus_operand_p (op1))
2399 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2400 return tem;
2401 break;
2403 case MULT:
2404 if (trueop1 == constm1_rtx)
2405 return simplify_gen_unary (NEG, mode, op0, mode);
2407 if (GET_CODE (op0) == NEG)
2409 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2410 /* If op1 is a MULT as well and simplify_unary_operation
2411 just moved the NEG to the second operand, simplify_gen_binary
2412 below could through simplify_associative_operation move
2413 the NEG around again and recurse endlessly. */
2414 if (temp
2415 && GET_CODE (op1) == MULT
2416 && GET_CODE (temp) == MULT
2417 && XEXP (op1, 0) == XEXP (temp, 0)
2418 && GET_CODE (XEXP (temp, 1)) == NEG
2419 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2420 temp = NULL_RTX;
2421 if (temp)
2422 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2424 if (GET_CODE (op1) == NEG)
2426 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2427 /* If op0 is a MULT as well and simplify_unary_operation
2428 just moved the NEG to the second operand, simplify_gen_binary
2429 below could through simplify_associative_operation move
2430 the NEG around again and recurse endlessly. */
2431 if (temp
2432 && GET_CODE (op0) == MULT
2433 && GET_CODE (temp) == MULT
2434 && XEXP (op0, 0) == XEXP (temp, 0)
2435 && GET_CODE (XEXP (temp, 1)) == NEG
2436 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2437 temp = NULL_RTX;
2438 if (temp)
2439 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2442 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2443 x is NaN, since x * 0 is then also NaN. Nor is it valid
2444 when the mode has signed zeros, since multiplying a negative
2445 number by 0 will give -0, not 0. */
2446 if (!HONOR_NANS (mode)
2447 && !HONOR_SIGNED_ZEROS (mode)
2448 && trueop1 == CONST0_RTX (mode)
2449 && ! side_effects_p (op0))
2450 return op1;
2452 /* In IEEE floating point, x*1 is not equivalent to x for
2453 signalling NaNs. */
2454 if (!HONOR_SNANS (mode)
2455 && trueop1 == CONST1_RTX (mode))
2456 return op0;
2458 /* Convert multiply by constant power of two into shift. */
2459 if (CONST_SCALAR_INT_P (trueop1))
2461 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2462 if (val >= 0)
2463 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2466 /* x*2 is x+x and x*(-1) is -x */
2467 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2468 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2469 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2470 && GET_MODE (op0) == mode)
2472 REAL_VALUE_TYPE d;
2473 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2475 if (REAL_VALUES_EQUAL (d, dconst2))
2476 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2478 if (!HONOR_SNANS (mode)
2479 && REAL_VALUES_EQUAL (d, dconstm1))
2480 return simplify_gen_unary (NEG, mode, op0, mode);
2483 /* Optimize -x * -x as x * x. */
2484 if (FLOAT_MODE_P (mode)
2485 && GET_CODE (op0) == NEG
2486 && GET_CODE (op1) == NEG
2487 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2488 && !side_effects_p (XEXP (op0, 0)))
2489 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2491 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2492 if (SCALAR_FLOAT_MODE_P (mode)
2493 && GET_CODE (op0) == ABS
2494 && GET_CODE (op1) == ABS
2495 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2496 && !side_effects_p (XEXP (op0, 0)))
2497 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2499 /* Reassociate multiplication, but for floating point MULTs
2500 only when the user specifies unsafe math optimizations. */
2501 if (! FLOAT_MODE_P (mode)
2502 || flag_unsafe_math_optimizations)
2504 tem = simplify_associative_operation (code, mode, op0, op1);
2505 if (tem)
2506 return tem;
2508 break;
2510 case IOR:
2511 if (trueop1 == CONST0_RTX (mode))
2512 return op0;
2513 if (INTEGRAL_MODE_P (mode)
2514 && trueop1 == CONSTM1_RTX (mode)
2515 && !side_effects_p (op0))
2516 return op1;
2517 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2518 return op0;
2519 /* A | (~A) -> -1 */
2520 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2521 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2522 && ! side_effects_p (op0)
2523 && SCALAR_INT_MODE_P (mode))
2524 return constm1_rtx;
2526 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2527 if (CONST_INT_P (op1)
2528 && HWI_COMPUTABLE_MODE_P (mode)
2529 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2530 && !side_effects_p (op0))
2531 return op1;
2533 /* Canonicalize (X & C1) | C2. */
2534 if (GET_CODE (op0) == AND
2535 && CONST_INT_P (trueop1)
2536 && CONST_INT_P (XEXP (op0, 1)))
2538 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2539 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2540 HOST_WIDE_INT c2 = INTVAL (trueop1);
2542 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2543 if ((c1 & c2) == c1
2544 && !side_effects_p (XEXP (op0, 0)))
2545 return trueop1;
2547 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2548 if (((c1|c2) & mask) == mask)
2549 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2551 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2552 if (((c1 & ~c2) & mask) != (c1 & mask))
2554 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2555 gen_int_mode (c1 & ~c2, mode));
2556 return simplify_gen_binary (IOR, mode, tem, op1);
2560 /* Convert (A & B) | A to A. */
2561 if (GET_CODE (op0) == AND
2562 && (rtx_equal_p (XEXP (op0, 0), op1)
2563 || rtx_equal_p (XEXP (op0, 1), op1))
2564 && ! side_effects_p (XEXP (op0, 0))
2565 && ! side_effects_p (XEXP (op0, 1)))
2566 return op1;
2568 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2569 mode size to (rotate A CX). */
2571 if (GET_CODE (op1) == ASHIFT
2572 || GET_CODE (op1) == SUBREG)
2574 opleft = op1;
2575 opright = op0;
2577 else
2579 opright = op1;
2580 opleft = op0;
2583 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2584 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2585 && CONST_INT_P (XEXP (opleft, 1))
2586 && CONST_INT_P (XEXP (opright, 1))
2587 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2588 == GET_MODE_PRECISION (mode)))
2589 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2591 /* Same, but for ashift that has been "simplified" to a wider mode
2592 by simplify_shift_const. */
2594 if (GET_CODE (opleft) == SUBREG
2595 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2596 && GET_CODE (opright) == LSHIFTRT
2597 && GET_CODE (XEXP (opright, 0)) == SUBREG
2598 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2599 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2600 && (GET_MODE_SIZE (GET_MODE (opleft))
2601 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2602 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2603 SUBREG_REG (XEXP (opright, 0)))
2604 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2605 && CONST_INT_P (XEXP (opright, 1))
2606 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2607 == GET_MODE_PRECISION (mode)))
2608 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2609 XEXP (SUBREG_REG (opleft), 1));
2611 /* If we have (ior (and (X C1) C2)), simplify this by making
2612 C1 as small as possible if C1 actually changes. */
2613 if (CONST_INT_P (op1)
2614 && (HWI_COMPUTABLE_MODE_P (mode)
2615 || INTVAL (op1) > 0)
2616 && GET_CODE (op0) == AND
2617 && CONST_INT_P (XEXP (op0, 1))
2618 && CONST_INT_P (op1)
2619 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2621 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2622 gen_int_mode (UINTVAL (XEXP (op0, 1))
2623 & ~UINTVAL (op1),
2624 mode));
2625 return simplify_gen_binary (IOR, mode, tmp, op1);
2628 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2629 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2630 the PLUS does not affect any of the bits in OP1: then we can do
2631 the IOR as a PLUS and we can associate. This is valid if OP1
2632 can be safely shifted left C bits. */
2633 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2634 && GET_CODE (XEXP (op0, 0)) == PLUS
2635 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2636 && CONST_INT_P (XEXP (op0, 1))
2637 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2639 int count = INTVAL (XEXP (op0, 1));
2640 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2642 if (mask >> count == INTVAL (trueop1)
2643 && trunc_int_for_mode (mask, mode) == mask
2644 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2645 return simplify_gen_binary (ASHIFTRT, mode,
2646 plus_constant (mode, XEXP (op0, 0),
2647 mask),
2648 XEXP (op0, 1));
2651 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2652 if (tem)
2653 return tem;
2655 tem = simplify_associative_operation (code, mode, op0, op1);
2656 if (tem)
2657 return tem;
2658 break;
2660 case XOR:
2661 if (trueop1 == CONST0_RTX (mode))
2662 return op0;
2663 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2664 return simplify_gen_unary (NOT, mode, op0, mode);
2665 if (rtx_equal_p (trueop0, trueop1)
2666 && ! side_effects_p (op0)
2667 && GET_MODE_CLASS (mode) != MODE_CC)
2668 return CONST0_RTX (mode);
2670 /* Canonicalize XOR of the most significant bit to PLUS. */
2671 if (CONST_SCALAR_INT_P (op1)
2672 && mode_signbit_p (mode, op1))
2673 return simplify_gen_binary (PLUS, mode, op0, op1);
2674 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2675 if (CONST_SCALAR_INT_P (op1)
2676 && GET_CODE (op0) == PLUS
2677 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2678 && mode_signbit_p (mode, XEXP (op0, 1)))
2679 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2680 simplify_gen_binary (XOR, mode, op1,
2681 XEXP (op0, 1)));
2683 /* If we are XORing two things that have no bits in common,
2684 convert them into an IOR. This helps to detect rotation encoded
2685 using those methods and possibly other simplifications. */
2687 if (HWI_COMPUTABLE_MODE_P (mode)
2688 && (nonzero_bits (op0, mode)
2689 & nonzero_bits (op1, mode)) == 0)
2690 return (simplify_gen_binary (IOR, mode, op0, op1));
2692 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2693 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2694 (NOT y). */
2696 int num_negated = 0;
2698 if (GET_CODE (op0) == NOT)
2699 num_negated++, op0 = XEXP (op0, 0);
2700 if (GET_CODE (op1) == NOT)
2701 num_negated++, op1 = XEXP (op1, 0);
2703 if (num_negated == 2)
2704 return simplify_gen_binary (XOR, mode, op0, op1);
2705 else if (num_negated == 1)
2706 return simplify_gen_unary (NOT, mode,
2707 simplify_gen_binary (XOR, mode, op0, op1),
2708 mode);
2711 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2712 correspond to a machine insn or result in further simplifications
2713 if B is a constant. */
2715 if (GET_CODE (op0) == AND
2716 && rtx_equal_p (XEXP (op0, 1), op1)
2717 && ! side_effects_p (op1))
2718 return simplify_gen_binary (AND, mode,
2719 simplify_gen_unary (NOT, mode,
2720 XEXP (op0, 0), mode),
2721 op1);
2723 else if (GET_CODE (op0) == AND
2724 && rtx_equal_p (XEXP (op0, 0), op1)
2725 && ! side_effects_p (op1))
2726 return simplify_gen_binary (AND, mode,
2727 simplify_gen_unary (NOT, mode,
2728 XEXP (op0, 1), mode),
2729 op1);
2731 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2732 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2733 out bits inverted twice and not set by C. Similarly, given
2734 (xor (and (xor A B) C) D), simplify without inverting C in
2735 the xor operand: (xor (and A C) (B&C)^D).
2737 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2738 && GET_CODE (XEXP (op0, 0)) == XOR
2739 && CONST_INT_P (op1)
2740 && CONST_INT_P (XEXP (op0, 1))
2741 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2743 enum rtx_code op = GET_CODE (op0);
2744 rtx a = XEXP (XEXP (op0, 0), 0);
2745 rtx b = XEXP (XEXP (op0, 0), 1);
2746 rtx c = XEXP (op0, 1);
2747 rtx d = op1;
2748 HOST_WIDE_INT bval = INTVAL (b);
2749 HOST_WIDE_INT cval = INTVAL (c);
2750 HOST_WIDE_INT dval = INTVAL (d);
2751 HOST_WIDE_INT xcval;
2753 if (op == IOR)
2754 xcval = ~cval;
2755 else
2756 xcval = cval;
2758 return simplify_gen_binary (XOR, mode,
2759 simplify_gen_binary (op, mode, a, c),
2760 gen_int_mode ((bval & xcval) ^ dval,
2761 mode));
2764 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2765 we can transform like this:
2766 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2767 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2768 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2769 Attempt a few simplifications when B and C are both constants. */
2770 if (GET_CODE (op0) == AND
2771 && CONST_INT_P (op1)
2772 && CONST_INT_P (XEXP (op0, 1)))
2774 rtx a = XEXP (op0, 0);
2775 rtx b = XEXP (op0, 1);
2776 rtx c = op1;
2777 HOST_WIDE_INT bval = INTVAL (b);
2778 HOST_WIDE_INT cval = INTVAL (c);
2780 /* Instead of computing ~A&C, we compute its negated value,
2781 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2782 optimize for sure. If it does not simplify, we still try
2783 to compute ~A&C below, but since that always allocates
2784 RTL, we don't try that before committing to returning a
2785 simplified expression. */
2786 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2787 GEN_INT (~cval));
2789 if ((~cval & bval) == 0)
2791 rtx na_c = NULL_RTX;
2792 if (n_na_c)
2793 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2794 else
2796 /* If ~A does not simplify, don't bother: we don't
2797 want to simplify 2 operations into 3, and if na_c
2798 were to simplify with na, n_na_c would have
2799 simplified as well. */
2800 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2801 if (na)
2802 na_c = simplify_gen_binary (AND, mode, na, c);
2805 /* Try to simplify ~A&C | ~B&C. */
2806 if (na_c != NULL_RTX)
2807 return simplify_gen_binary (IOR, mode, na_c,
2808 gen_int_mode (~bval & cval, mode));
2810 else
2812 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2813 if (n_na_c == CONSTM1_RTX (mode))
2815 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2816 gen_int_mode (~cval & bval,
2817 mode));
2818 return simplify_gen_binary (IOR, mode, a_nc_b,
2819 gen_int_mode (~bval & cval,
2820 mode));
2825 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2826 comparison if STORE_FLAG_VALUE is 1. */
2827 if (STORE_FLAG_VALUE == 1
2828 && trueop1 == const1_rtx
2829 && COMPARISON_P (op0)
2830 && (reversed = reversed_comparison (op0, mode)))
2831 return reversed;
2833 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2834 is (lt foo (const_int 0)), so we can perform the above
2835 simplification if STORE_FLAG_VALUE is 1. */
2837 if (STORE_FLAG_VALUE == 1
2838 && trueop1 == const1_rtx
2839 && GET_CODE (op0) == LSHIFTRT
2840 && CONST_INT_P (XEXP (op0, 1))
2841 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2842 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2844 /* (xor (comparison foo bar) (const_int sign-bit))
2845 when STORE_FLAG_VALUE is the sign bit. */
2846 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2847 && trueop1 == const_true_rtx
2848 && COMPARISON_P (op0)
2849 && (reversed = reversed_comparison (op0, mode)))
2850 return reversed;
2852 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2853 if (tem)
2854 return tem;
2856 tem = simplify_associative_operation (code, mode, op0, op1);
2857 if (tem)
2858 return tem;
2859 break;
2861 case AND:
2862 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2863 return trueop1;
2864 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2865 return op0;
2866 if (HWI_COMPUTABLE_MODE_P (mode))
2868 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2869 HOST_WIDE_INT nzop1;
2870 if (CONST_INT_P (trueop1))
2872 HOST_WIDE_INT val1 = INTVAL (trueop1);
2873 /* If we are turning off bits already known off in OP0, we need
2874 not do an AND. */
2875 if ((nzop0 & ~val1) == 0)
2876 return op0;
2878 nzop1 = nonzero_bits (trueop1, mode);
2879 /* If we are clearing all the nonzero bits, the result is zero. */
2880 if ((nzop1 & nzop0) == 0
2881 && !side_effects_p (op0) && !side_effects_p (op1))
2882 return CONST0_RTX (mode);
2884 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2885 && GET_MODE_CLASS (mode) != MODE_CC)
2886 return op0;
2887 /* A & (~A) -> 0 */
2888 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2889 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2890 && ! side_effects_p (op0)
2891 && GET_MODE_CLASS (mode) != MODE_CC)
2892 return CONST0_RTX (mode);
2894 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2895 there are no nonzero bits of C outside of X's mode. */
2896 if ((GET_CODE (op0) == SIGN_EXTEND
2897 || GET_CODE (op0) == ZERO_EXTEND)
2898 && CONST_INT_P (trueop1)
2899 && HWI_COMPUTABLE_MODE_P (mode)
2900 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2901 & UINTVAL (trueop1)) == 0)
2903 machine_mode imode = GET_MODE (XEXP (op0, 0));
2904 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2905 gen_int_mode (INTVAL (trueop1),
2906 imode));
2907 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2910 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2911 we might be able to further simplify the AND with X and potentially
2912 remove the truncation altogether. */
2913 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2915 rtx x = XEXP (op0, 0);
2916 machine_mode xmode = GET_MODE (x);
2917 tem = simplify_gen_binary (AND, xmode, x,
2918 gen_int_mode (INTVAL (trueop1), xmode));
2919 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2922 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2923 if (GET_CODE (op0) == IOR
2924 && CONST_INT_P (trueop1)
2925 && CONST_INT_P (XEXP (op0, 1)))
2927 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2928 return simplify_gen_binary (IOR, mode,
2929 simplify_gen_binary (AND, mode,
2930 XEXP (op0, 0), op1),
2931 gen_int_mode (tmp, mode));
2934 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2935 insn (and may simplify more). */
2936 if (GET_CODE (op0) == XOR
2937 && rtx_equal_p (XEXP (op0, 0), op1)
2938 && ! side_effects_p (op1))
2939 return simplify_gen_binary (AND, mode,
2940 simplify_gen_unary (NOT, mode,
2941 XEXP (op0, 1), mode),
2942 op1);
2944 if (GET_CODE (op0) == XOR
2945 && rtx_equal_p (XEXP (op0, 1), op1)
2946 && ! side_effects_p (op1))
2947 return simplify_gen_binary (AND, mode,
2948 simplify_gen_unary (NOT, mode,
2949 XEXP (op0, 0), mode),
2950 op1);
2952 /* Similarly for (~(A ^ B)) & A. */
2953 if (GET_CODE (op0) == NOT
2954 && GET_CODE (XEXP (op0, 0)) == XOR
2955 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2956 && ! side_effects_p (op1))
2957 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2959 if (GET_CODE (op0) == NOT
2960 && GET_CODE (XEXP (op0, 0)) == XOR
2961 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2962 && ! side_effects_p (op1))
2963 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2965 /* Convert (A | B) & A to A. */
2966 if (GET_CODE (op0) == IOR
2967 && (rtx_equal_p (XEXP (op0, 0), op1)
2968 || rtx_equal_p (XEXP (op0, 1), op1))
2969 && ! side_effects_p (XEXP (op0, 0))
2970 && ! side_effects_p (XEXP (op0, 1)))
2971 return op1;
2973 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2974 ((A & N) + B) & M -> (A + B) & M
2975 Similarly if (N & M) == 0,
2976 ((A | N) + B) & M -> (A + B) & M
2977 and for - instead of + and/or ^ instead of |.
2978 Also, if (N & M) == 0, then
2979 (A +- N) & M -> A & M. */
2980 if (CONST_INT_P (trueop1)
2981 && HWI_COMPUTABLE_MODE_P (mode)
2982 && ~UINTVAL (trueop1)
2983 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2984 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2986 rtx pmop[2];
2987 int which;
2989 pmop[0] = XEXP (op0, 0);
2990 pmop[1] = XEXP (op0, 1);
2992 if (CONST_INT_P (pmop[1])
2993 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2994 return simplify_gen_binary (AND, mode, pmop[0], op1);
2996 for (which = 0; which < 2; which++)
2998 tem = pmop[which];
2999 switch (GET_CODE (tem))
3001 case AND:
3002 if (CONST_INT_P (XEXP (tem, 1))
3003 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3004 == UINTVAL (trueop1))
3005 pmop[which] = XEXP (tem, 0);
3006 break;
3007 case IOR:
3008 case XOR:
3009 if (CONST_INT_P (XEXP (tem, 1))
3010 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3011 pmop[which] = XEXP (tem, 0);
3012 break;
3013 default:
3014 break;
3018 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3020 tem = simplify_gen_binary (GET_CODE (op0), mode,
3021 pmop[0], pmop[1]);
3022 return simplify_gen_binary (code, mode, tem, op1);
3026 /* (and X (ior (not X) Y) -> (and X Y) */
3027 if (GET_CODE (op1) == IOR
3028 && GET_CODE (XEXP (op1, 0)) == NOT
3029 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3030 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3032 /* (and (ior (not X) Y) X) -> (and X Y) */
3033 if (GET_CODE (op0) == IOR
3034 && GET_CODE (XEXP (op0, 0)) == NOT
3035 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3036 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3038 /* (and X (ior Y (not X)) -> (and X Y) */
3039 if (GET_CODE (op1) == IOR
3040 && GET_CODE (XEXP (op1, 1)) == NOT
3041 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3042 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3044 /* (and (ior Y (not X)) X) -> (and X Y) */
3045 if (GET_CODE (op0) == IOR
3046 && GET_CODE (XEXP (op0, 1)) == NOT
3047 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3048 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3050 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3051 if (tem)
3052 return tem;
3054 tem = simplify_associative_operation (code, mode, op0, op1);
3055 if (tem)
3056 return tem;
3057 break;
3059 case UDIV:
3060 /* 0/x is 0 (or x&0 if x has side-effects). */
3061 if (trueop0 == CONST0_RTX (mode))
3063 if (side_effects_p (op1))
3064 return simplify_gen_binary (AND, mode, op1, trueop0);
3065 return trueop0;
3067 /* x/1 is x. */
3068 if (trueop1 == CONST1_RTX (mode))
3070 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3071 if (tem)
3072 return tem;
3074 /* Convert divide by power of two into shift. */
3075 if (CONST_INT_P (trueop1)
3076 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3077 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3078 break;
3080 case DIV:
3081 /* Handle floating point and integers separately. */
3082 if (SCALAR_FLOAT_MODE_P (mode))
3084 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3085 safe for modes with NaNs, since 0.0 / 0.0 will then be
3086 NaN rather than 0.0. Nor is it safe for modes with signed
3087 zeros, since dividing 0 by a negative number gives -0.0 */
3088 if (trueop0 == CONST0_RTX (mode)
3089 && !HONOR_NANS (mode)
3090 && !HONOR_SIGNED_ZEROS (mode)
3091 && ! side_effects_p (op1))
3092 return op0;
3093 /* x/1.0 is x. */
3094 if (trueop1 == CONST1_RTX (mode)
3095 && !HONOR_SNANS (mode))
3096 return op0;
3098 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3099 && trueop1 != CONST0_RTX (mode))
3101 REAL_VALUE_TYPE d;
3102 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3104 /* x/-1.0 is -x. */
3105 if (REAL_VALUES_EQUAL (d, dconstm1)
3106 && !HONOR_SNANS (mode))
3107 return simplify_gen_unary (NEG, mode, op0, mode);
3109 /* Change FP division by a constant into multiplication.
3110 Only do this with -freciprocal-math. */
3111 if (flag_reciprocal_math
3112 && !REAL_VALUES_EQUAL (d, dconst0))
3114 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3115 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3116 return simplify_gen_binary (MULT, mode, op0, tem);
3120 else if (SCALAR_INT_MODE_P (mode))
3122 /* 0/x is 0 (or x&0 if x has side-effects). */
3123 if (trueop0 == CONST0_RTX (mode)
3124 && !cfun->can_throw_non_call_exceptions)
3126 if (side_effects_p (op1))
3127 return simplify_gen_binary (AND, mode, op1, trueop0);
3128 return trueop0;
3130 /* x/1 is x. */
3131 if (trueop1 == CONST1_RTX (mode))
3133 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3134 if (tem)
3135 return tem;
3137 /* x/-1 is -x. */
3138 if (trueop1 == constm1_rtx)
3140 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3141 if (x)
3142 return simplify_gen_unary (NEG, mode, x, mode);
3145 break;
3147 case UMOD:
3148 /* 0%x is 0 (or x&0 if x has side-effects). */
3149 if (trueop0 == CONST0_RTX (mode))
3151 if (side_effects_p (op1))
3152 return simplify_gen_binary (AND, mode, op1, trueop0);
3153 return trueop0;
3155 /* x%1 is 0 (of x&0 if x has side-effects). */
3156 if (trueop1 == CONST1_RTX (mode))
3158 if (side_effects_p (op0))
3159 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3160 return CONST0_RTX (mode);
3162 /* Implement modulus by power of two as AND. */
3163 if (CONST_INT_P (trueop1)
3164 && exact_log2 (UINTVAL (trueop1)) > 0)
3165 return simplify_gen_binary (AND, mode, op0,
3166 gen_int_mode (INTVAL (op1) - 1, mode));
3167 break;
3169 case MOD:
3170 /* 0%x is 0 (or x&0 if x has side-effects). */
3171 if (trueop0 == CONST0_RTX (mode))
3173 if (side_effects_p (op1))
3174 return simplify_gen_binary (AND, mode, op1, trueop0);
3175 return trueop0;
3177 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3178 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3180 if (side_effects_p (op0))
3181 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3182 return CONST0_RTX (mode);
3184 break;
3186 case ROTATERT:
3187 case ROTATE:
3188 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3189 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3190 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3191 amount instead. */
3192 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3193 if (CONST_INT_P (trueop1)
3194 && IN_RANGE (INTVAL (trueop1),
3195 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3196 GET_MODE_PRECISION (mode) - 1))
3197 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3198 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3199 - INTVAL (trueop1)));
3200 #endif
3201 /* FALLTHRU */
3202 case ASHIFTRT:
3203 if (trueop1 == CONST0_RTX (mode))
3204 return op0;
3205 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3206 return op0;
3207 /* Rotating ~0 always results in ~0. */
3208 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3209 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3210 && ! side_effects_p (op1))
3211 return op0;
3212 /* Given:
3213 scalar modes M1, M2
3214 scalar constants c1, c2
3215 size (M2) > size (M1)
3216 c1 == size (M2) - size (M1)
3217 optimize:
3218 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3219 <low_part>)
3220 (const_int <c2>))
3222 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3223 <low_part>). */
3224 if (code == ASHIFTRT
3225 && !VECTOR_MODE_P (mode)
3226 && SUBREG_P (op0)
3227 && CONST_INT_P (op1)
3228 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3229 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3230 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3231 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3232 > GET_MODE_BITSIZE (mode))
3233 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3234 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3235 - GET_MODE_BITSIZE (mode)))
3236 && subreg_lowpart_p (op0))
3238 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3239 + INTVAL (op1));
3240 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3241 tmp = simplify_gen_binary (ASHIFTRT,
3242 GET_MODE (SUBREG_REG (op0)),
3243 XEXP (SUBREG_REG (op0), 0),
3244 tmp);
3245 return lowpart_subreg (mode, tmp, inner_mode);
3247 canonicalize_shift:
3248 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3250 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3251 if (val != INTVAL (op1))
3252 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3254 break;
3256 case ASHIFT:
3257 case SS_ASHIFT:
3258 case US_ASHIFT:
3259 if (trueop1 == CONST0_RTX (mode))
3260 return op0;
3261 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3262 return op0;
3263 goto canonicalize_shift;
3265 case LSHIFTRT:
3266 if (trueop1 == CONST0_RTX (mode))
3267 return op0;
3268 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3269 return op0;
3270 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3271 if (GET_CODE (op0) == CLZ
3272 && CONST_INT_P (trueop1)
3273 && STORE_FLAG_VALUE == 1
3274 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3276 machine_mode imode = GET_MODE (XEXP (op0, 0));
3277 unsigned HOST_WIDE_INT zero_val = 0;
3279 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3280 && zero_val == GET_MODE_PRECISION (imode)
3281 && INTVAL (trueop1) == exact_log2 (zero_val))
3282 return simplify_gen_relational (EQ, mode, imode,
3283 XEXP (op0, 0), const0_rtx);
3285 goto canonicalize_shift;
3287 case SMIN:
3288 if (width <= HOST_BITS_PER_WIDE_INT
3289 && mode_signbit_p (mode, trueop1)
3290 && ! side_effects_p (op0))
3291 return op1;
3292 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3293 return op0;
3294 tem = simplify_associative_operation (code, mode, op0, op1);
3295 if (tem)
3296 return tem;
3297 break;
3299 case SMAX:
3300 if (width <= HOST_BITS_PER_WIDE_INT
3301 && CONST_INT_P (trueop1)
3302 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3303 && ! side_effects_p (op0))
3304 return op1;
3305 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3306 return op0;
3307 tem = simplify_associative_operation (code, mode, op0, op1);
3308 if (tem)
3309 return tem;
3310 break;
3312 case UMIN:
3313 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3314 return op1;
3315 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3316 return op0;
3317 tem = simplify_associative_operation (code, mode, op0, op1);
3318 if (tem)
3319 return tem;
3320 break;
3322 case UMAX:
3323 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3324 return op1;
3325 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3326 return op0;
3327 tem = simplify_associative_operation (code, mode, op0, op1);
3328 if (tem)
3329 return tem;
3330 break;
3332 case SS_PLUS:
3333 case US_PLUS:
3334 case SS_MINUS:
3335 case US_MINUS:
3336 case SS_MULT:
3337 case US_MULT:
3338 case SS_DIV:
3339 case US_DIV:
3340 /* ??? There are simplifications that can be done. */
3341 return 0;
3343 case VEC_SELECT:
3344 if (!VECTOR_MODE_P (mode))
3346 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3347 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3348 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3349 gcc_assert (XVECLEN (trueop1, 0) == 1);
3350 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3352 if (GET_CODE (trueop0) == CONST_VECTOR)
3353 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3354 (trueop1, 0, 0)));
3356 /* Extract a scalar element from a nested VEC_SELECT expression
3357 (with optional nested VEC_CONCAT expression). Some targets
3358 (i386) extract scalar element from a vector using chain of
3359 nested VEC_SELECT expressions. When input operand is a memory
3360 operand, this operation can be simplified to a simple scalar
3361 load from an offseted memory address. */
3362 if (GET_CODE (trueop0) == VEC_SELECT)
3364 rtx op0 = XEXP (trueop0, 0);
3365 rtx op1 = XEXP (trueop0, 1);
3367 machine_mode opmode = GET_MODE (op0);
3368 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3369 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3371 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3372 int elem;
3374 rtvec vec;
3375 rtx tmp_op, tmp;
3377 gcc_assert (GET_CODE (op1) == PARALLEL);
3378 gcc_assert (i < n_elts);
3380 /* Select element, pointed by nested selector. */
3381 elem = INTVAL (XVECEXP (op1, 0, i));
3383 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3384 if (GET_CODE (op0) == VEC_CONCAT)
3386 rtx op00 = XEXP (op0, 0);
3387 rtx op01 = XEXP (op0, 1);
3389 machine_mode mode00, mode01;
3390 int n_elts00, n_elts01;
3392 mode00 = GET_MODE (op00);
3393 mode01 = GET_MODE (op01);
3395 /* Find out number of elements of each operand. */
3396 if (VECTOR_MODE_P (mode00))
3398 elt_size = GET_MODE_UNIT_SIZE (mode00);
3399 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3401 else
3402 n_elts00 = 1;
3404 if (VECTOR_MODE_P (mode01))
3406 elt_size = GET_MODE_UNIT_SIZE (mode01);
3407 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3409 else
3410 n_elts01 = 1;
3412 gcc_assert (n_elts == n_elts00 + n_elts01);
3414 /* Select correct operand of VEC_CONCAT
3415 and adjust selector. */
3416 if (elem < n_elts01)
3417 tmp_op = op00;
3418 else
3420 tmp_op = op01;
3421 elem -= n_elts00;
3424 else
3425 tmp_op = op0;
3427 vec = rtvec_alloc (1);
3428 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3430 tmp = gen_rtx_fmt_ee (code, mode,
3431 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3432 return tmp;
3434 if (GET_CODE (trueop0) == VEC_DUPLICATE
3435 && GET_MODE (XEXP (trueop0, 0)) == mode)
3436 return XEXP (trueop0, 0);
3438 else
3440 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3441 gcc_assert (GET_MODE_INNER (mode)
3442 == GET_MODE_INNER (GET_MODE (trueop0)));
3443 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3445 if (GET_CODE (trueop0) == CONST_VECTOR)
3447 int elt_size = GET_MODE_UNIT_SIZE (mode);
3448 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3449 rtvec v = rtvec_alloc (n_elts);
3450 unsigned int i;
3452 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3453 for (i = 0; i < n_elts; i++)
3455 rtx x = XVECEXP (trueop1, 0, i);
3457 gcc_assert (CONST_INT_P (x));
3458 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3459 INTVAL (x));
3462 return gen_rtx_CONST_VECTOR (mode, v);
3465 /* Recognize the identity. */
3466 if (GET_MODE (trueop0) == mode)
3468 bool maybe_ident = true;
3469 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3471 rtx j = XVECEXP (trueop1, 0, i);
3472 if (!CONST_INT_P (j) || INTVAL (j) != i)
3474 maybe_ident = false;
3475 break;
3478 if (maybe_ident)
3479 return trueop0;
3482 /* If we build {a,b} then permute it, build the result directly. */
3483 if (XVECLEN (trueop1, 0) == 2
3484 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3485 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3486 && GET_CODE (trueop0) == VEC_CONCAT
3487 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3488 && GET_MODE (XEXP (trueop0, 0)) == mode
3489 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3490 && GET_MODE (XEXP (trueop0, 1)) == mode)
3492 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3493 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3494 rtx subop0, subop1;
3496 gcc_assert (i0 < 4 && i1 < 4);
3497 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3498 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3500 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3503 if (XVECLEN (trueop1, 0) == 2
3504 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3505 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3506 && GET_CODE (trueop0) == VEC_CONCAT
3507 && GET_MODE (trueop0) == mode)
3509 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3510 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3511 rtx subop0, subop1;
3513 gcc_assert (i0 < 2 && i1 < 2);
3514 subop0 = XEXP (trueop0, i0);
3515 subop1 = XEXP (trueop0, i1);
3517 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3520 /* If we select one half of a vec_concat, return that. */
3521 if (GET_CODE (trueop0) == VEC_CONCAT
3522 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3524 rtx subop0 = XEXP (trueop0, 0);
3525 rtx subop1 = XEXP (trueop0, 1);
3526 machine_mode mode0 = GET_MODE (subop0);
3527 machine_mode mode1 = GET_MODE (subop1);
3528 int li = GET_MODE_UNIT_SIZE (mode0);
3529 int l0 = GET_MODE_SIZE (mode0) / li;
3530 int l1 = GET_MODE_SIZE (mode1) / li;
3531 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3532 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3534 bool success = true;
3535 for (int i = 1; i < l0; ++i)
3537 rtx j = XVECEXP (trueop1, 0, i);
3538 if (!CONST_INT_P (j) || INTVAL (j) != i)
3540 success = false;
3541 break;
3544 if (success)
3545 return subop0;
3547 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3549 bool success = true;
3550 for (int i = 1; i < l1; ++i)
3552 rtx j = XVECEXP (trueop1, 0, i);
3553 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3555 success = false;
3556 break;
3559 if (success)
3560 return subop1;
3565 if (XVECLEN (trueop1, 0) == 1
3566 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3567 && GET_CODE (trueop0) == VEC_CONCAT)
3569 rtx vec = trueop0;
3570 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3572 /* Try to find the element in the VEC_CONCAT. */
3573 while (GET_MODE (vec) != mode
3574 && GET_CODE (vec) == VEC_CONCAT)
3576 HOST_WIDE_INT vec_size;
3578 if (CONST_INT_P (XEXP (vec, 0)))
3580 /* vec_concat of two const_ints doesn't make sense with
3581 respect to modes. */
3582 if (CONST_INT_P (XEXP (vec, 1)))
3583 return 0;
3585 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3586 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3588 else
3589 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3591 if (offset < vec_size)
3592 vec = XEXP (vec, 0);
3593 else
3595 offset -= vec_size;
3596 vec = XEXP (vec, 1);
3598 vec = avoid_constant_pool_reference (vec);
3601 if (GET_MODE (vec) == mode)
3602 return vec;
3605 /* If we select elements in a vec_merge that all come from the same
3606 operand, select from that operand directly. */
3607 if (GET_CODE (op0) == VEC_MERGE)
3609 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3610 if (CONST_INT_P (trueop02))
3612 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3613 bool all_operand0 = true;
3614 bool all_operand1 = true;
3615 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3617 rtx j = XVECEXP (trueop1, 0, i);
3618 if (sel & (1 << UINTVAL (j)))
3619 all_operand1 = false;
3620 else
3621 all_operand0 = false;
3623 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3624 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3625 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3626 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3630 /* If we have two nested selects that are inverses of each
3631 other, replace them with the source operand. */
3632 if (GET_CODE (trueop0) == VEC_SELECT
3633 && GET_MODE (XEXP (trueop0, 0)) == mode)
3635 rtx op0_subop1 = XEXP (trueop0, 1);
3636 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3637 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3639 /* Apply the outer ordering vector to the inner one. (The inner
3640 ordering vector is expressly permitted to be of a different
3641 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3642 then the two VEC_SELECTs cancel. */
3643 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3645 rtx x = XVECEXP (trueop1, 0, i);
3646 if (!CONST_INT_P (x))
3647 return 0;
3648 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3649 if (!CONST_INT_P (y) || i != INTVAL (y))
3650 return 0;
3652 return XEXP (trueop0, 0);
3655 return 0;
3656 case VEC_CONCAT:
3658 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3659 ? GET_MODE (trueop0)
3660 : GET_MODE_INNER (mode));
3661 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3662 ? GET_MODE (trueop1)
3663 : GET_MODE_INNER (mode));
3665 gcc_assert (VECTOR_MODE_P (mode));
3666 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3667 == GET_MODE_SIZE (mode));
3669 if (VECTOR_MODE_P (op0_mode))
3670 gcc_assert (GET_MODE_INNER (mode)
3671 == GET_MODE_INNER (op0_mode));
3672 else
3673 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3675 if (VECTOR_MODE_P (op1_mode))
3676 gcc_assert (GET_MODE_INNER (mode)
3677 == GET_MODE_INNER (op1_mode));
3678 else
3679 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3681 if ((GET_CODE (trueop0) == CONST_VECTOR
3682 || CONST_SCALAR_INT_P (trueop0)
3683 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3684 && (GET_CODE (trueop1) == CONST_VECTOR
3685 || CONST_SCALAR_INT_P (trueop1)
3686 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3688 int elt_size = GET_MODE_UNIT_SIZE (mode);
3689 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3690 rtvec v = rtvec_alloc (n_elts);
3691 unsigned int i;
3692 unsigned in_n_elts = 1;
3694 if (VECTOR_MODE_P (op0_mode))
3695 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3696 for (i = 0; i < n_elts; i++)
3698 if (i < in_n_elts)
3700 if (!VECTOR_MODE_P (op0_mode))
3701 RTVEC_ELT (v, i) = trueop0;
3702 else
3703 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3705 else
3707 if (!VECTOR_MODE_P (op1_mode))
3708 RTVEC_ELT (v, i) = trueop1;
3709 else
3710 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3711 i - in_n_elts);
3715 return gen_rtx_CONST_VECTOR (mode, v);
3718 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3719 Restrict the transformation to avoid generating a VEC_SELECT with a
3720 mode unrelated to its operand. */
3721 if (GET_CODE (trueop0) == VEC_SELECT
3722 && GET_CODE (trueop1) == VEC_SELECT
3723 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3724 && GET_MODE (XEXP (trueop0, 0)) == mode)
3726 rtx par0 = XEXP (trueop0, 1);
3727 rtx par1 = XEXP (trueop1, 1);
3728 int len0 = XVECLEN (par0, 0);
3729 int len1 = XVECLEN (par1, 0);
3730 rtvec vec = rtvec_alloc (len0 + len1);
3731 for (int i = 0; i < len0; i++)
3732 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3733 for (int i = 0; i < len1; i++)
3734 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3735 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3736 gen_rtx_PARALLEL (VOIDmode, vec));
3739 return 0;
3741 default:
3742 gcc_unreachable ();
3745 return 0;
3749 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3750 rtx op0, rtx op1)
3752 unsigned int width = GET_MODE_PRECISION (mode);
3754 if (VECTOR_MODE_P (mode)
3755 && code != VEC_CONCAT
3756 && GET_CODE (op0) == CONST_VECTOR
3757 && GET_CODE (op1) == CONST_VECTOR)
3759 unsigned n_elts = GET_MODE_NUNITS (mode);
3760 machine_mode op0mode = GET_MODE (op0);
3761 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3762 machine_mode op1mode = GET_MODE (op1);
3763 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3764 rtvec v = rtvec_alloc (n_elts);
3765 unsigned int i;
3767 gcc_assert (op0_n_elts == n_elts);
3768 gcc_assert (op1_n_elts == n_elts);
3769 for (i = 0; i < n_elts; i++)
3771 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3772 CONST_VECTOR_ELT (op0, i),
3773 CONST_VECTOR_ELT (op1, i));
3774 if (!x)
3775 return 0;
3776 RTVEC_ELT (v, i) = x;
3779 return gen_rtx_CONST_VECTOR (mode, v);
3782 if (VECTOR_MODE_P (mode)
3783 && code == VEC_CONCAT
3784 && (CONST_SCALAR_INT_P (op0)
3785 || GET_CODE (op0) == CONST_FIXED
3786 || CONST_DOUBLE_AS_FLOAT_P (op0))
3787 && (CONST_SCALAR_INT_P (op1)
3788 || CONST_DOUBLE_AS_FLOAT_P (op1)
3789 || GET_CODE (op1) == CONST_FIXED))
3791 unsigned n_elts = GET_MODE_NUNITS (mode);
3792 rtvec v = rtvec_alloc (n_elts);
3794 gcc_assert (n_elts >= 2);
3795 if (n_elts == 2)
3797 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3798 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3800 RTVEC_ELT (v, 0) = op0;
3801 RTVEC_ELT (v, 1) = op1;
3803 else
3805 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3806 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3807 unsigned i;
3809 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3810 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3811 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3813 for (i = 0; i < op0_n_elts; ++i)
3814 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3815 for (i = 0; i < op1_n_elts; ++i)
3816 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3819 return gen_rtx_CONST_VECTOR (mode, v);
3822 if (SCALAR_FLOAT_MODE_P (mode)
3823 && CONST_DOUBLE_AS_FLOAT_P (op0)
3824 && CONST_DOUBLE_AS_FLOAT_P (op1)
3825 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3827 if (code == AND
3828 || code == IOR
3829 || code == XOR)
3831 long tmp0[4];
3832 long tmp1[4];
3833 REAL_VALUE_TYPE r;
3834 int i;
3836 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3837 GET_MODE (op0));
3838 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3839 GET_MODE (op1));
3840 for (i = 0; i < 4; i++)
3842 switch (code)
3844 case AND:
3845 tmp0[i] &= tmp1[i];
3846 break;
3847 case IOR:
3848 tmp0[i] |= tmp1[i];
3849 break;
3850 case XOR:
3851 tmp0[i] ^= tmp1[i];
3852 break;
3853 default:
3854 gcc_unreachable ();
3857 real_from_target (&r, tmp0, mode);
3858 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3860 else
3862 REAL_VALUE_TYPE f0, f1, value, result;
3863 bool inexact;
3865 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3866 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3867 real_convert (&f0, mode, &f0);
3868 real_convert (&f1, mode, &f1);
3870 if (HONOR_SNANS (mode)
3871 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3872 return 0;
3874 if (code == DIV
3875 && REAL_VALUES_EQUAL (f1, dconst0)
3876 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3877 return 0;
3879 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3880 && flag_trapping_math
3881 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3883 int s0 = REAL_VALUE_NEGATIVE (f0);
3884 int s1 = REAL_VALUE_NEGATIVE (f1);
3886 switch (code)
3888 case PLUS:
3889 /* Inf + -Inf = NaN plus exception. */
3890 if (s0 != s1)
3891 return 0;
3892 break;
3893 case MINUS:
3894 /* Inf - Inf = NaN plus exception. */
3895 if (s0 == s1)
3896 return 0;
3897 break;
3898 case DIV:
3899 /* Inf / Inf = NaN plus exception. */
3900 return 0;
3901 default:
3902 break;
3906 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3907 && flag_trapping_math
3908 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3909 || (REAL_VALUE_ISINF (f1)
3910 && REAL_VALUES_EQUAL (f0, dconst0))))
3911 /* Inf * 0 = NaN plus exception. */
3912 return 0;
3914 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3915 &f0, &f1);
3916 real_convert (&result, mode, &value);
3918 /* Don't constant fold this floating point operation if
3919 the result has overflowed and flag_trapping_math. */
3921 if (flag_trapping_math
3922 && MODE_HAS_INFINITIES (mode)
3923 && REAL_VALUE_ISINF (result)
3924 && !REAL_VALUE_ISINF (f0)
3925 && !REAL_VALUE_ISINF (f1))
3926 /* Overflow plus exception. */
3927 return 0;
3929 /* Don't constant fold this floating point operation if the
3930 result may dependent upon the run-time rounding mode and
3931 flag_rounding_math is set, or if GCC's software emulation
3932 is unable to accurately represent the result. */
3934 if ((flag_rounding_math
3935 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3936 && (inexact || !real_identical (&result, &value)))
3937 return NULL_RTX;
3939 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3943 /* We can fold some multi-word operations. */
3944 if ((GET_MODE_CLASS (mode) == MODE_INT
3945 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3946 && CONST_SCALAR_INT_P (op0)
3947 && CONST_SCALAR_INT_P (op1))
3949 wide_int result;
3950 bool overflow;
3951 rtx_mode_t pop0 = std::make_pair (op0, mode);
3952 rtx_mode_t pop1 = std::make_pair (op1, mode);
3954 #if TARGET_SUPPORTS_WIDE_INT == 0
3955 /* This assert keeps the simplification from producing a result
3956 that cannot be represented in a CONST_DOUBLE but a lot of
3957 upstream callers expect that this function never fails to
3958 simplify something and so you if you added this to the test
3959 above the code would die later anyway. If this assert
3960 happens, you just need to make the port support wide int. */
3961 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3962 #endif
3963 switch (code)
3965 case MINUS:
3966 result = wi::sub (pop0, pop1);
3967 break;
3969 case PLUS:
3970 result = wi::add (pop0, pop1);
3971 break;
3973 case MULT:
3974 result = wi::mul (pop0, pop1);
3975 break;
3977 case DIV:
3978 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3979 if (overflow)
3980 return NULL_RTX;
3981 break;
3983 case MOD:
3984 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
3985 if (overflow)
3986 return NULL_RTX;
3987 break;
3989 case UDIV:
3990 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
3991 if (overflow)
3992 return NULL_RTX;
3993 break;
3995 case UMOD:
3996 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
3997 if (overflow)
3998 return NULL_RTX;
3999 break;
4001 case AND:
4002 result = wi::bit_and (pop0, pop1);
4003 break;
4005 case IOR:
4006 result = wi::bit_or (pop0, pop1);
4007 break;
4009 case XOR:
4010 result = wi::bit_xor (pop0, pop1);
4011 break;
4013 case SMIN:
4014 result = wi::smin (pop0, pop1);
4015 break;
4017 case SMAX:
4018 result = wi::smax (pop0, pop1);
4019 break;
4021 case UMIN:
4022 result = wi::umin (pop0, pop1);
4023 break;
4025 case UMAX:
4026 result = wi::umax (pop0, pop1);
4027 break;
4029 case LSHIFTRT:
4030 case ASHIFTRT:
4031 case ASHIFT:
4033 wide_int wop1 = pop1;
4034 if (SHIFT_COUNT_TRUNCATED)
4035 wop1 = wi::umod_trunc (wop1, width);
4036 else if (wi::geu_p (wop1, width))
4037 return NULL_RTX;
4039 switch (code)
4041 case LSHIFTRT:
4042 result = wi::lrshift (pop0, wop1);
4043 break;
4045 case ASHIFTRT:
4046 result = wi::arshift (pop0, wop1);
4047 break;
4049 case ASHIFT:
4050 result = wi::lshift (pop0, wop1);
4051 break;
4053 default:
4054 gcc_unreachable ();
4056 break;
4058 case ROTATE:
4059 case ROTATERT:
4061 if (wi::neg_p (pop1))
4062 return NULL_RTX;
4064 switch (code)
4066 case ROTATE:
4067 result = wi::lrotate (pop0, pop1);
4068 break;
4070 case ROTATERT:
4071 result = wi::rrotate (pop0, pop1);
4072 break;
4074 default:
4075 gcc_unreachable ();
4077 break;
4079 default:
4080 return NULL_RTX;
4082 return immed_wide_int_const (result, mode);
4085 return NULL_RTX;
4090 /* Return a positive integer if X should sort after Y. The value
4091 returned is 1 if and only if X and Y are both regs. */
4093 static int
4094 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4096 int result;
4098 result = (commutative_operand_precedence (y)
4099 - commutative_operand_precedence (x));
4100 if (result)
4101 return result + result;
4103 /* Group together equal REGs to do more simplification. */
4104 if (REG_P (x) && REG_P (y))
4105 return REGNO (x) > REGNO (y);
4107 return 0;
4110 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4111 operands may be another PLUS or MINUS.
4113 Rather than test for specific case, we do this by a brute-force method
4114 and do all possible simplifications until no more changes occur. Then
4115 we rebuild the operation.
4117 May return NULL_RTX when no changes were made. */
4119 static rtx
4120 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4121 rtx op1)
4123 struct simplify_plus_minus_op_data
4125 rtx op;
4126 short neg;
4127 } ops[16];
4128 rtx result, tem;
4129 int n_ops = 2;
4130 int changed, n_constants, canonicalized = 0;
4131 int i, j;
4133 memset (ops, 0, sizeof ops);
4135 /* Set up the two operands and then expand them until nothing has been
4136 changed. If we run out of room in our array, give up; this should
4137 almost never happen. */
4139 ops[0].op = op0;
4140 ops[0].neg = 0;
4141 ops[1].op = op1;
4142 ops[1].neg = (code == MINUS);
4146 changed = 0;
4147 n_constants = 0;
4149 for (i = 0; i < n_ops; i++)
4151 rtx this_op = ops[i].op;
4152 int this_neg = ops[i].neg;
4153 enum rtx_code this_code = GET_CODE (this_op);
4155 switch (this_code)
4157 case PLUS:
4158 case MINUS:
4159 if (n_ops == ARRAY_SIZE (ops))
4160 return NULL_RTX;
4162 ops[n_ops].op = XEXP (this_op, 1);
4163 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4164 n_ops++;
4166 ops[i].op = XEXP (this_op, 0);
4167 changed = 1;
4168 /* If this operand was negated then we will potentially
4169 canonicalize the expression. Similarly if we don't
4170 place the operands adjacent we're re-ordering the
4171 expression and thus might be performing a
4172 canonicalization. Ignore register re-ordering.
4173 ??? It might be better to shuffle the ops array here,
4174 but then (plus (plus (A, B), plus (C, D))) wouldn't
4175 be seen as non-canonical. */
4176 if (this_neg
4177 || (i != n_ops - 2
4178 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4179 canonicalized = 1;
4180 break;
4182 case NEG:
4183 ops[i].op = XEXP (this_op, 0);
4184 ops[i].neg = ! this_neg;
4185 changed = 1;
4186 canonicalized = 1;
4187 break;
4189 case CONST:
4190 if (n_ops != ARRAY_SIZE (ops)
4191 && GET_CODE (XEXP (this_op, 0)) == PLUS
4192 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4193 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4195 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4196 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4197 ops[n_ops].neg = this_neg;
4198 n_ops++;
4199 changed = 1;
4200 canonicalized = 1;
4202 break;
4204 case NOT:
4205 /* ~a -> (-a - 1) */
4206 if (n_ops != ARRAY_SIZE (ops))
4208 ops[n_ops].op = CONSTM1_RTX (mode);
4209 ops[n_ops++].neg = this_neg;
4210 ops[i].op = XEXP (this_op, 0);
4211 ops[i].neg = !this_neg;
4212 changed = 1;
4213 canonicalized = 1;
4215 break;
4217 case CONST_INT:
4218 n_constants++;
4219 if (this_neg)
4221 ops[i].op = neg_const_int (mode, this_op);
4222 ops[i].neg = 0;
4223 changed = 1;
4224 canonicalized = 1;
4226 break;
4228 default:
4229 break;
4233 while (changed);
4235 if (n_constants > 1)
4236 canonicalized = 1;
4238 gcc_assert (n_ops >= 2);
4240 /* If we only have two operands, we can avoid the loops. */
4241 if (n_ops == 2)
4243 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4244 rtx lhs, rhs;
4246 /* Get the two operands. Be careful with the order, especially for
4247 the cases where code == MINUS. */
4248 if (ops[0].neg && ops[1].neg)
4250 lhs = gen_rtx_NEG (mode, ops[0].op);
4251 rhs = ops[1].op;
4253 else if (ops[0].neg)
4255 lhs = ops[1].op;
4256 rhs = ops[0].op;
4258 else
4260 lhs = ops[0].op;
4261 rhs = ops[1].op;
4264 return simplify_const_binary_operation (code, mode, lhs, rhs);
4267 /* Now simplify each pair of operands until nothing changes. */
4268 while (1)
4270 /* Insertion sort is good enough for a small array. */
4271 for (i = 1; i < n_ops; i++)
4273 struct simplify_plus_minus_op_data save;
4274 int cmp;
4276 j = i - 1;
4277 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4278 if (cmp <= 0)
4279 continue;
4280 /* Just swapping registers doesn't count as canonicalization. */
4281 if (cmp != 1)
4282 canonicalized = 1;
4284 save = ops[i];
4286 ops[j + 1] = ops[j];
4287 while (j--
4288 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4289 ops[j + 1] = save;
4292 changed = 0;
4293 for (i = n_ops - 1; i > 0; i--)
4294 for (j = i - 1; j >= 0; j--)
4296 rtx lhs = ops[j].op, rhs = ops[i].op;
4297 int lneg = ops[j].neg, rneg = ops[i].neg;
4299 if (lhs != 0 && rhs != 0)
4301 enum rtx_code ncode = PLUS;
4303 if (lneg != rneg)
4305 ncode = MINUS;
4306 if (lneg)
4307 std::swap (lhs, rhs);
4309 else if (swap_commutative_operands_p (lhs, rhs))
4310 std::swap (lhs, rhs);
4312 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4313 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4315 rtx tem_lhs, tem_rhs;
4317 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4318 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4319 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4320 tem_rhs);
4322 if (tem && !CONSTANT_P (tem))
4323 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4325 else
4326 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4328 if (tem)
4330 /* Reject "simplifications" that just wrap the two
4331 arguments in a CONST. Failure to do so can result
4332 in infinite recursion with simplify_binary_operation
4333 when it calls us to simplify CONST operations.
4334 Also, if we find such a simplification, don't try
4335 any more combinations with this rhs: We must have
4336 something like symbol+offset, ie. one of the
4337 trivial CONST expressions we handle later. */
4338 if (GET_CODE (tem) == CONST
4339 && GET_CODE (XEXP (tem, 0)) == ncode
4340 && XEXP (XEXP (tem, 0), 0) == lhs
4341 && XEXP (XEXP (tem, 0), 1) == rhs)
4342 break;
4343 lneg &= rneg;
4344 if (GET_CODE (tem) == NEG)
4345 tem = XEXP (tem, 0), lneg = !lneg;
4346 if (CONST_INT_P (tem) && lneg)
4347 tem = neg_const_int (mode, tem), lneg = 0;
4349 ops[i].op = tem;
4350 ops[i].neg = lneg;
4351 ops[j].op = NULL_RTX;
4352 changed = 1;
4353 canonicalized = 1;
4358 if (!changed)
4359 break;
4361 /* Pack all the operands to the lower-numbered entries. */
4362 for (i = 0, j = 0; j < n_ops; j++)
4363 if (ops[j].op)
4365 ops[i] = ops[j];
4366 i++;
4368 n_ops = i;
4371 /* If nothing changed, fail. */
4372 if (!canonicalized)
4373 return NULL_RTX;
4375 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4376 if (n_ops == 2
4377 && CONST_INT_P (ops[1].op)
4378 && CONSTANT_P (ops[0].op)
4379 && ops[0].neg)
4380 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4382 /* We suppressed creation of trivial CONST expressions in the
4383 combination loop to avoid recursion. Create one manually now.
4384 The combination loop should have ensured that there is exactly
4385 one CONST_INT, and the sort will have ensured that it is last
4386 in the array and that any other constant will be next-to-last. */
4388 if (n_ops > 1
4389 && CONST_INT_P (ops[n_ops - 1].op)
4390 && CONSTANT_P (ops[n_ops - 2].op))
4392 rtx value = ops[n_ops - 1].op;
4393 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4394 value = neg_const_int (mode, value);
4395 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4396 INTVAL (value));
4397 n_ops--;
4400 /* Put a non-negated operand first, if possible. */
4402 for (i = 0; i < n_ops && ops[i].neg; i++)
4403 continue;
4404 if (i == n_ops)
4405 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4406 else if (i != 0)
4408 tem = ops[0].op;
4409 ops[0] = ops[i];
4410 ops[i].op = tem;
4411 ops[i].neg = 1;
4414 /* Now make the result by performing the requested operations. */
4415 result = ops[0].op;
4416 for (i = 1; i < n_ops; i++)
4417 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4418 mode, result, ops[i].op);
4420 return result;
4423 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4424 static bool
4425 plus_minus_operand_p (const_rtx x)
4427 return GET_CODE (x) == PLUS
4428 || GET_CODE (x) == MINUS
4429 || (GET_CODE (x) == CONST
4430 && GET_CODE (XEXP (x, 0)) == PLUS
4431 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4432 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4435 /* Like simplify_binary_operation except used for relational operators.
4436 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4437 not also be VOIDmode.
4439 CMP_MODE specifies in which mode the comparison is done in, so it is
4440 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4441 the operands or, if both are VOIDmode, the operands are compared in
4442 "infinite precision". */
4444 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4445 machine_mode cmp_mode, rtx op0, rtx op1)
4447 rtx tem, trueop0, trueop1;
4449 if (cmp_mode == VOIDmode)
4450 cmp_mode = GET_MODE (op0);
4451 if (cmp_mode == VOIDmode)
4452 cmp_mode = GET_MODE (op1);
4454 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4455 if (tem)
4457 if (SCALAR_FLOAT_MODE_P (mode))
4459 if (tem == const0_rtx)
4460 return CONST0_RTX (mode);
4461 #ifdef FLOAT_STORE_FLAG_VALUE
4463 REAL_VALUE_TYPE val;
4464 val = FLOAT_STORE_FLAG_VALUE (mode);
4465 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4467 #else
4468 return NULL_RTX;
4469 #endif
4471 if (VECTOR_MODE_P (mode))
4473 if (tem == const0_rtx)
4474 return CONST0_RTX (mode);
4475 #ifdef VECTOR_STORE_FLAG_VALUE
4477 int i, units;
4478 rtvec v;
4480 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4481 if (val == NULL_RTX)
4482 return NULL_RTX;
4483 if (val == const1_rtx)
4484 return CONST1_RTX (mode);
4486 units = GET_MODE_NUNITS (mode);
4487 v = rtvec_alloc (units);
4488 for (i = 0; i < units; i++)
4489 RTVEC_ELT (v, i) = val;
4490 return gen_rtx_raw_CONST_VECTOR (mode, v);
4492 #else
4493 return NULL_RTX;
4494 #endif
4497 return tem;
4500 /* For the following tests, ensure const0_rtx is op1. */
4501 if (swap_commutative_operands_p (op0, op1)
4502 || (op0 == const0_rtx && op1 != const0_rtx))
4503 std::swap (op0, op1), code = swap_condition (code);
4505 /* If op0 is a compare, extract the comparison arguments from it. */
4506 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4507 return simplify_gen_relational (code, mode, VOIDmode,
4508 XEXP (op0, 0), XEXP (op0, 1));
4510 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4511 || CC0_P (op0))
4512 return NULL_RTX;
4514 trueop0 = avoid_constant_pool_reference (op0);
4515 trueop1 = avoid_constant_pool_reference (op1);
4516 return simplify_relational_operation_1 (code, mode, cmp_mode,
4517 trueop0, trueop1);
4520 /* This part of simplify_relational_operation is only used when CMP_MODE
4521 is not in class MODE_CC (i.e. it is a real comparison).
4523 MODE is the mode of the result, while CMP_MODE specifies in which
4524 mode the comparison is done in, so it is the mode of the operands. */
4526 static rtx
4527 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4528 machine_mode cmp_mode, rtx op0, rtx op1)
4530 enum rtx_code op0code = GET_CODE (op0);
4532 if (op1 == const0_rtx && COMPARISON_P (op0))
4534 /* If op0 is a comparison, extract the comparison arguments
4535 from it. */
4536 if (code == NE)
4538 if (GET_MODE (op0) == mode)
4539 return simplify_rtx (op0);
4540 else
4541 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4542 XEXP (op0, 0), XEXP (op0, 1));
4544 else if (code == EQ)
4546 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4547 if (new_code != UNKNOWN)
4548 return simplify_gen_relational (new_code, mode, VOIDmode,
4549 XEXP (op0, 0), XEXP (op0, 1));
4553 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4554 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4555 if ((code == LTU || code == GEU)
4556 && GET_CODE (op0) == PLUS
4557 && CONST_INT_P (XEXP (op0, 1))
4558 && (rtx_equal_p (op1, XEXP (op0, 0))
4559 || rtx_equal_p (op1, XEXP (op0, 1)))
4560 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4561 && XEXP (op0, 1) != const0_rtx)
4563 rtx new_cmp
4564 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4565 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4566 cmp_mode, XEXP (op0, 0), new_cmp);
4569 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4570 if ((code == LTU || code == GEU)
4571 && GET_CODE (op0) == PLUS
4572 && rtx_equal_p (op1, XEXP (op0, 1))
4573 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4574 && !rtx_equal_p (op1, XEXP (op0, 0)))
4575 return simplify_gen_relational (code, mode, cmp_mode, op0,
4576 copy_rtx (XEXP (op0, 0)));
4578 if (op1 == const0_rtx)
4580 /* Canonicalize (GTU x 0) as (NE x 0). */
4581 if (code == GTU)
4582 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4583 /* Canonicalize (LEU x 0) as (EQ x 0). */
4584 if (code == LEU)
4585 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4587 else if (op1 == const1_rtx)
4589 switch (code)
4591 case GE:
4592 /* Canonicalize (GE x 1) as (GT x 0). */
4593 return simplify_gen_relational (GT, mode, cmp_mode,
4594 op0, const0_rtx);
4595 case GEU:
4596 /* Canonicalize (GEU x 1) as (NE x 0). */
4597 return simplify_gen_relational (NE, mode, cmp_mode,
4598 op0, const0_rtx);
4599 case LT:
4600 /* Canonicalize (LT x 1) as (LE x 0). */
4601 return simplify_gen_relational (LE, mode, cmp_mode,
4602 op0, const0_rtx);
4603 case LTU:
4604 /* Canonicalize (LTU x 1) as (EQ x 0). */
4605 return simplify_gen_relational (EQ, mode, cmp_mode,
4606 op0, const0_rtx);
4607 default:
4608 break;
4611 else if (op1 == constm1_rtx)
4613 /* Canonicalize (LE x -1) as (LT x 0). */
4614 if (code == LE)
4615 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4616 /* Canonicalize (GT x -1) as (GE x 0). */
4617 if (code == GT)
4618 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4621 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4622 if ((code == EQ || code == NE)
4623 && (op0code == PLUS || op0code == MINUS)
4624 && CONSTANT_P (op1)
4625 && CONSTANT_P (XEXP (op0, 1))
4626 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4628 rtx x = XEXP (op0, 0);
4629 rtx c = XEXP (op0, 1);
4630 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4631 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4633 /* Detect an infinite recursive condition, where we oscillate at this
4634 simplification case between:
4635 A + B == C <---> C - B == A,
4636 where A, B, and C are all constants with non-simplifiable expressions,
4637 usually SYMBOL_REFs. */
4638 if (GET_CODE (tem) == invcode
4639 && CONSTANT_P (x)
4640 && rtx_equal_p (c, XEXP (tem, 1)))
4641 return NULL_RTX;
4643 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4646 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4647 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4648 if (code == NE
4649 && op1 == const0_rtx
4650 && GET_MODE_CLASS (mode) == MODE_INT
4651 && cmp_mode != VOIDmode
4652 /* ??? Work-around BImode bugs in the ia64 backend. */
4653 && mode != BImode
4654 && cmp_mode != BImode
4655 && nonzero_bits (op0, cmp_mode) == 1
4656 && STORE_FLAG_VALUE == 1)
4657 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4658 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4659 : lowpart_subreg (mode, op0, cmp_mode);
4661 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4662 if ((code == EQ || code == NE)
4663 && op1 == const0_rtx
4664 && op0code == XOR)
4665 return simplify_gen_relational (code, mode, cmp_mode,
4666 XEXP (op0, 0), XEXP (op0, 1));
4668 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4669 if ((code == EQ || code == NE)
4670 && op0code == XOR
4671 && rtx_equal_p (XEXP (op0, 0), op1)
4672 && !side_effects_p (XEXP (op0, 0)))
4673 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4674 CONST0_RTX (mode));
4676 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4677 if ((code == EQ || code == NE)
4678 && op0code == XOR
4679 && rtx_equal_p (XEXP (op0, 1), op1)
4680 && !side_effects_p (XEXP (op0, 1)))
4681 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4682 CONST0_RTX (mode));
4684 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4685 if ((code == EQ || code == NE)
4686 && op0code == XOR
4687 && CONST_SCALAR_INT_P (op1)
4688 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4689 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4690 simplify_gen_binary (XOR, cmp_mode,
4691 XEXP (op0, 1), op1));
4693 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4694 can be implemented with a BICS instruction on some targets, or
4695 constant-folded if y is a constant. */
4696 if ((code == EQ || code == NE)
4697 && op0code == AND
4698 && rtx_equal_p (XEXP (op0, 0), op1)
4699 && !side_effects_p (op1)
4700 && op1 != CONST0_RTX (cmp_mode))
4702 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4703 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4705 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4706 CONST0_RTX (cmp_mode));
4709 /* Likewise for (eq/ne (and x y) y). */
4710 if ((code == EQ || code == NE)
4711 && op0code == AND
4712 && rtx_equal_p (XEXP (op0, 1), op1)
4713 && !side_effects_p (op1)
4714 && op1 != CONST0_RTX (cmp_mode))
4716 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4717 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4719 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4720 CONST0_RTX (cmp_mode));
4723 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4724 if ((code == EQ || code == NE)
4725 && GET_CODE (op0) == BSWAP
4726 && CONST_SCALAR_INT_P (op1))
4727 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4728 simplify_gen_unary (BSWAP, cmp_mode,
4729 op1, cmp_mode));
4731 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4732 if ((code == EQ || code == NE)
4733 && GET_CODE (op0) == BSWAP
4734 && GET_CODE (op1) == BSWAP)
4735 return simplify_gen_relational (code, mode, cmp_mode,
4736 XEXP (op0, 0), XEXP (op1, 0));
4738 if (op0code == POPCOUNT && op1 == const0_rtx)
4739 switch (code)
4741 case EQ:
4742 case LE:
4743 case LEU:
4744 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4745 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4746 XEXP (op0, 0), const0_rtx);
4748 case NE:
4749 case GT:
4750 case GTU:
4751 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4752 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4753 XEXP (op0, 0), const0_rtx);
4755 default:
4756 break;
4759 return NULL_RTX;
4762 enum
4764 CMP_EQ = 1,
4765 CMP_LT = 2,
4766 CMP_GT = 4,
4767 CMP_LTU = 8,
4768 CMP_GTU = 16
4772 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4773 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4774 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4775 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4776 For floating-point comparisons, assume that the operands were ordered. */
4778 static rtx
4779 comparison_result (enum rtx_code code, int known_results)
4781 switch (code)
4783 case EQ:
4784 case UNEQ:
4785 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4786 case NE:
4787 case LTGT:
4788 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4790 case LT:
4791 case UNLT:
4792 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4793 case GE:
4794 case UNGE:
4795 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4797 case GT:
4798 case UNGT:
4799 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4800 case LE:
4801 case UNLE:
4802 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4804 case LTU:
4805 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4806 case GEU:
4807 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4809 case GTU:
4810 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4811 case LEU:
4812 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4814 case ORDERED:
4815 return const_true_rtx;
4816 case UNORDERED:
4817 return const0_rtx;
4818 default:
4819 gcc_unreachable ();
4823 /* Check if the given comparison (done in the given MODE) is actually
4824 a tautology or a contradiction. If the mode is VOID_mode, the
4825 comparison is done in "infinite precision". If no simplification
4826 is possible, this function returns zero. Otherwise, it returns
4827 either const_true_rtx or const0_rtx. */
4830 simplify_const_relational_operation (enum rtx_code code,
4831 machine_mode mode,
4832 rtx op0, rtx op1)
4834 rtx tem;
4835 rtx trueop0;
4836 rtx trueop1;
4838 gcc_assert (mode != VOIDmode
4839 || (GET_MODE (op0) == VOIDmode
4840 && GET_MODE (op1) == VOIDmode));
4842 /* If op0 is a compare, extract the comparison arguments from it. */
4843 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4845 op1 = XEXP (op0, 1);
4846 op0 = XEXP (op0, 0);
4848 if (GET_MODE (op0) != VOIDmode)
4849 mode = GET_MODE (op0);
4850 else if (GET_MODE (op1) != VOIDmode)
4851 mode = GET_MODE (op1);
4852 else
4853 return 0;
4856 /* We can't simplify MODE_CC values since we don't know what the
4857 actual comparison is. */
4858 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4859 return 0;
4861 /* Make sure the constant is second. */
4862 if (swap_commutative_operands_p (op0, op1))
4864 std::swap (op0, op1);
4865 code = swap_condition (code);
4868 trueop0 = avoid_constant_pool_reference (op0);
4869 trueop1 = avoid_constant_pool_reference (op1);
4871 /* For integer comparisons of A and B maybe we can simplify A - B and can
4872 then simplify a comparison of that with zero. If A and B are both either
4873 a register or a CONST_INT, this can't help; testing for these cases will
4874 prevent infinite recursion here and speed things up.
4876 We can only do this for EQ and NE comparisons as otherwise we may
4877 lose or introduce overflow which we cannot disregard as undefined as
4878 we do not know the signedness of the operation on either the left or
4879 the right hand side of the comparison. */
4881 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4882 && (code == EQ || code == NE)
4883 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4884 && (REG_P (op1) || CONST_INT_P (trueop1)))
4885 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4886 /* We cannot do this if tem is a nonzero address. */
4887 && ! nonzero_address_p (tem))
4888 return simplify_const_relational_operation (signed_condition (code),
4889 mode, tem, const0_rtx);
4891 if (! HONOR_NANS (mode) && code == ORDERED)
4892 return const_true_rtx;
4894 if (! HONOR_NANS (mode) && code == UNORDERED)
4895 return const0_rtx;
4897 /* For modes without NaNs, if the two operands are equal, we know the
4898 result except if they have side-effects. Even with NaNs we know
4899 the result of unordered comparisons and, if signaling NaNs are
4900 irrelevant, also the result of LT/GT/LTGT. */
4901 if ((! HONOR_NANS (trueop0)
4902 || code == UNEQ || code == UNLE || code == UNGE
4903 || ((code == LT || code == GT || code == LTGT)
4904 && ! HONOR_SNANS (trueop0)))
4905 && rtx_equal_p (trueop0, trueop1)
4906 && ! side_effects_p (trueop0))
4907 return comparison_result (code, CMP_EQ);
4909 /* If the operands are floating-point constants, see if we can fold
4910 the result. */
4911 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4912 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4913 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4915 REAL_VALUE_TYPE d0, d1;
4917 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4918 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4920 /* Comparisons are unordered iff at least one of the values is NaN. */
4921 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4922 switch (code)
4924 case UNEQ:
4925 case UNLT:
4926 case UNGT:
4927 case UNLE:
4928 case UNGE:
4929 case NE:
4930 case UNORDERED:
4931 return const_true_rtx;
4932 case EQ:
4933 case LT:
4934 case GT:
4935 case LE:
4936 case GE:
4937 case LTGT:
4938 case ORDERED:
4939 return const0_rtx;
4940 default:
4941 return 0;
4944 return comparison_result (code,
4945 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4946 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4949 /* Otherwise, see if the operands are both integers. */
4950 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4951 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4953 /* It would be nice if we really had a mode here. However, the
4954 largest int representable on the target is as good as
4955 infinite. */
4956 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4957 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4958 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4960 if (wi::eq_p (ptrueop0, ptrueop1))
4961 return comparison_result (code, CMP_EQ);
4962 else
4964 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4965 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4966 return comparison_result (code, cr);
4970 /* Optimize comparisons with upper and lower bounds. */
4971 if (HWI_COMPUTABLE_MODE_P (mode)
4972 && CONST_INT_P (trueop1)
4973 && !side_effects_p (trueop0))
4975 int sign;
4976 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4977 HOST_WIDE_INT val = INTVAL (trueop1);
4978 HOST_WIDE_INT mmin, mmax;
4980 if (code == GEU
4981 || code == LEU
4982 || code == GTU
4983 || code == LTU)
4984 sign = 0;
4985 else
4986 sign = 1;
4988 /* Get a reduced range if the sign bit is zero. */
4989 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4991 mmin = 0;
4992 mmax = nonzero;
4994 else
4996 rtx mmin_rtx, mmax_rtx;
4997 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4999 mmin = INTVAL (mmin_rtx);
5000 mmax = INTVAL (mmax_rtx);
5001 if (sign)
5003 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5005 mmin >>= (sign_copies - 1);
5006 mmax >>= (sign_copies - 1);
5010 switch (code)
5012 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5013 case GEU:
5014 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5015 return const_true_rtx;
5016 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5017 return const0_rtx;
5018 break;
5019 case GE:
5020 if (val <= mmin)
5021 return const_true_rtx;
5022 if (val > mmax)
5023 return const0_rtx;
5024 break;
5026 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5027 case LEU:
5028 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5029 return const_true_rtx;
5030 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5031 return const0_rtx;
5032 break;
5033 case LE:
5034 if (val >= mmax)
5035 return const_true_rtx;
5036 if (val < mmin)
5037 return const0_rtx;
5038 break;
5040 case EQ:
5041 /* x == y is always false for y out of range. */
5042 if (val < mmin || val > mmax)
5043 return const0_rtx;
5044 break;
5046 /* x > y is always false for y >= mmax, always true for y < mmin. */
5047 case GTU:
5048 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5049 return const0_rtx;
5050 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5051 return const_true_rtx;
5052 break;
5053 case GT:
5054 if (val >= mmax)
5055 return const0_rtx;
5056 if (val < mmin)
5057 return const_true_rtx;
5058 break;
5060 /* x < y is always false for y <= mmin, always true for y > mmax. */
5061 case LTU:
5062 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5063 return const0_rtx;
5064 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5065 return const_true_rtx;
5066 break;
5067 case LT:
5068 if (val <= mmin)
5069 return const0_rtx;
5070 if (val > mmax)
5071 return const_true_rtx;
5072 break;
5074 case NE:
5075 /* x != y is always true for y out of range. */
5076 if (val < mmin || val > mmax)
5077 return const_true_rtx;
5078 break;
5080 default:
5081 break;
5085 /* Optimize integer comparisons with zero. */
5086 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5088 /* Some addresses are known to be nonzero. We don't know
5089 their sign, but equality comparisons are known. */
5090 if (nonzero_address_p (trueop0))
5092 if (code == EQ || code == LEU)
5093 return const0_rtx;
5094 if (code == NE || code == GTU)
5095 return const_true_rtx;
5098 /* See if the first operand is an IOR with a constant. If so, we
5099 may be able to determine the result of this comparison. */
5100 if (GET_CODE (op0) == IOR)
5102 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5103 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5105 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5106 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5107 && (UINTVAL (inner_const)
5108 & ((unsigned HOST_WIDE_INT) 1
5109 << sign_bitnum)));
5111 switch (code)
5113 case EQ:
5114 case LEU:
5115 return const0_rtx;
5116 case NE:
5117 case GTU:
5118 return const_true_rtx;
5119 case LT:
5120 case LE:
5121 if (has_sign)
5122 return const_true_rtx;
5123 break;
5124 case GT:
5125 case GE:
5126 if (has_sign)
5127 return const0_rtx;
5128 break;
5129 default:
5130 break;
5136 /* Optimize comparison of ABS with zero. */
5137 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5138 && (GET_CODE (trueop0) == ABS
5139 || (GET_CODE (trueop0) == FLOAT_EXTEND
5140 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5142 switch (code)
5144 case LT:
5145 /* Optimize abs(x) < 0.0. */
5146 if (!HONOR_SNANS (mode)
5147 && (!INTEGRAL_MODE_P (mode)
5148 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5150 if (INTEGRAL_MODE_P (mode)
5151 && (issue_strict_overflow_warning
5152 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5153 warning (OPT_Wstrict_overflow,
5154 ("assuming signed overflow does not occur when "
5155 "assuming abs (x) < 0 is false"));
5156 return const0_rtx;
5158 break;
5160 case GE:
5161 /* Optimize abs(x) >= 0.0. */
5162 if (!HONOR_NANS (mode)
5163 && (!INTEGRAL_MODE_P (mode)
5164 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5166 if (INTEGRAL_MODE_P (mode)
5167 && (issue_strict_overflow_warning
5168 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5169 warning (OPT_Wstrict_overflow,
5170 ("assuming signed overflow does not occur when "
5171 "assuming abs (x) >= 0 is true"));
5172 return const_true_rtx;
5174 break;
5176 case UNGE:
5177 /* Optimize ! (abs(x) < 0.0). */
5178 return const_true_rtx;
5180 default:
5181 break;
5185 return 0;
5188 /* Simplify CODE, an operation with result mode MODE and three operands,
5189 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5190 a constant. Return 0 if no simplifications is possible. */
5193 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5194 machine_mode op0_mode, rtx op0, rtx op1,
5195 rtx op2)
5197 unsigned int width = GET_MODE_PRECISION (mode);
5198 bool any_change = false;
5199 rtx tem, trueop2;
5201 /* VOIDmode means "infinite" precision. */
5202 if (width == 0)
5203 width = HOST_BITS_PER_WIDE_INT;
5205 switch (code)
5207 case FMA:
5208 /* Simplify negations around the multiplication. */
5209 /* -a * -b + c => a * b + c. */
5210 if (GET_CODE (op0) == NEG)
5212 tem = simplify_unary_operation (NEG, mode, op1, mode);
5213 if (tem)
5214 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5216 else if (GET_CODE (op1) == NEG)
5218 tem = simplify_unary_operation (NEG, mode, op0, mode);
5219 if (tem)
5220 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5223 /* Canonicalize the two multiplication operands. */
5224 /* a * -b + c => -b * a + c. */
5225 if (swap_commutative_operands_p (op0, op1))
5226 std::swap (op0, op1), any_change = true;
5228 if (any_change)
5229 return gen_rtx_FMA (mode, op0, op1, op2);
5230 return NULL_RTX;
5232 case SIGN_EXTRACT:
5233 case ZERO_EXTRACT:
5234 if (CONST_INT_P (op0)
5235 && CONST_INT_P (op1)
5236 && CONST_INT_P (op2)
5237 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5238 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5240 /* Extracting a bit-field from a constant */
5241 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5242 HOST_WIDE_INT op1val = INTVAL (op1);
5243 HOST_WIDE_INT op2val = INTVAL (op2);
5244 if (BITS_BIG_ENDIAN)
5245 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5246 else
5247 val >>= op2val;
5249 if (HOST_BITS_PER_WIDE_INT != op1val)
5251 /* First zero-extend. */
5252 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5253 /* If desired, propagate sign bit. */
5254 if (code == SIGN_EXTRACT
5255 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5256 != 0)
5257 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5260 return gen_int_mode (val, mode);
5262 break;
5264 case IF_THEN_ELSE:
5265 if (CONST_INT_P (op0))
5266 return op0 != const0_rtx ? op1 : op2;
5268 /* Convert c ? a : a into "a". */
5269 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5270 return op1;
5272 /* Convert a != b ? a : b into "a". */
5273 if (GET_CODE (op0) == NE
5274 && ! side_effects_p (op0)
5275 && ! HONOR_NANS (mode)
5276 && ! HONOR_SIGNED_ZEROS (mode)
5277 && ((rtx_equal_p (XEXP (op0, 0), op1)
5278 && rtx_equal_p (XEXP (op0, 1), op2))
5279 || (rtx_equal_p (XEXP (op0, 0), op2)
5280 && rtx_equal_p (XEXP (op0, 1), op1))))
5281 return op1;
5283 /* Convert a == b ? a : b into "b". */
5284 if (GET_CODE (op0) == EQ
5285 && ! side_effects_p (op0)
5286 && ! HONOR_NANS (mode)
5287 && ! HONOR_SIGNED_ZEROS (mode)
5288 && ((rtx_equal_p (XEXP (op0, 0), op1)
5289 && rtx_equal_p (XEXP (op0, 1), op2))
5290 || (rtx_equal_p (XEXP (op0, 0), op2)
5291 && rtx_equal_p (XEXP (op0, 1), op1))))
5292 return op2;
5294 /* Convert (!c) != {0,...,0} ? a : b into
5295 c != {0,...,0} ? b : a for vector modes. */
5296 if (VECTOR_MODE_P (GET_MODE (op1))
5297 && GET_CODE (op0) == NE
5298 && GET_CODE (XEXP (op0, 0)) == NOT
5299 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5301 rtx cv = XEXP (op0, 1);
5302 int nunits = CONST_VECTOR_NUNITS (cv);
5303 bool ok = true;
5304 for (int i = 0; i < nunits; ++i)
5305 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5307 ok = false;
5308 break;
5310 if (ok)
5312 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5313 XEXP (XEXP (op0, 0), 0),
5314 XEXP (op0, 1));
5315 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5316 return retval;
5320 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5322 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5323 ? GET_MODE (XEXP (op0, 1))
5324 : GET_MODE (XEXP (op0, 0)));
5325 rtx temp;
5327 /* Look for happy constants in op1 and op2. */
5328 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5330 HOST_WIDE_INT t = INTVAL (op1);
5331 HOST_WIDE_INT f = INTVAL (op2);
5333 if (t == STORE_FLAG_VALUE && f == 0)
5334 code = GET_CODE (op0);
5335 else if (t == 0 && f == STORE_FLAG_VALUE)
5337 enum rtx_code tmp;
5338 tmp = reversed_comparison_code (op0, NULL_RTX);
5339 if (tmp == UNKNOWN)
5340 break;
5341 code = tmp;
5343 else
5344 break;
5346 return simplify_gen_relational (code, mode, cmp_mode,
5347 XEXP (op0, 0), XEXP (op0, 1));
5350 if (cmp_mode == VOIDmode)
5351 cmp_mode = op0_mode;
5352 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5353 cmp_mode, XEXP (op0, 0),
5354 XEXP (op0, 1));
5356 /* See if any simplifications were possible. */
5357 if (temp)
5359 if (CONST_INT_P (temp))
5360 return temp == const0_rtx ? op2 : op1;
5361 else if (temp)
5362 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5365 break;
5367 case VEC_MERGE:
5368 gcc_assert (GET_MODE (op0) == mode);
5369 gcc_assert (GET_MODE (op1) == mode);
5370 gcc_assert (VECTOR_MODE_P (mode));
5371 trueop2 = avoid_constant_pool_reference (op2);
5372 if (CONST_INT_P (trueop2))
5374 int elt_size = GET_MODE_UNIT_SIZE (mode);
5375 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5376 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5377 unsigned HOST_WIDE_INT mask;
5378 if (n_elts == HOST_BITS_PER_WIDE_INT)
5379 mask = -1;
5380 else
5381 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5383 if (!(sel & mask) && !side_effects_p (op0))
5384 return op1;
5385 if ((sel & mask) == mask && !side_effects_p (op1))
5386 return op0;
5388 rtx trueop0 = avoid_constant_pool_reference (op0);
5389 rtx trueop1 = avoid_constant_pool_reference (op1);
5390 if (GET_CODE (trueop0) == CONST_VECTOR
5391 && GET_CODE (trueop1) == CONST_VECTOR)
5393 rtvec v = rtvec_alloc (n_elts);
5394 unsigned int i;
5396 for (i = 0; i < n_elts; i++)
5397 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5398 ? CONST_VECTOR_ELT (trueop0, i)
5399 : CONST_VECTOR_ELT (trueop1, i));
5400 return gen_rtx_CONST_VECTOR (mode, v);
5403 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5404 if no element from a appears in the result. */
5405 if (GET_CODE (op0) == VEC_MERGE)
5407 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5408 if (CONST_INT_P (tem))
5410 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5411 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5412 return simplify_gen_ternary (code, mode, mode,
5413 XEXP (op0, 1), op1, op2);
5414 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5415 return simplify_gen_ternary (code, mode, mode,
5416 XEXP (op0, 0), op1, op2);
5419 if (GET_CODE (op1) == VEC_MERGE)
5421 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5422 if (CONST_INT_P (tem))
5424 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5425 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5426 return simplify_gen_ternary (code, mode, mode,
5427 op0, XEXP (op1, 1), op2);
5428 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5429 return simplify_gen_ternary (code, mode, mode,
5430 op0, XEXP (op1, 0), op2);
5434 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5435 with a. */
5436 if (GET_CODE (op0) == VEC_DUPLICATE
5437 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5438 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5439 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5441 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5442 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5444 if (XEXP (XEXP (op0, 0), 0) == op1
5445 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5446 return op1;
5451 if (rtx_equal_p (op0, op1)
5452 && !side_effects_p (op2) && !side_effects_p (op1))
5453 return op0;
5455 break;
5457 default:
5458 gcc_unreachable ();
5461 return 0;
5464 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5465 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5466 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5468 Works by unpacking OP into a collection of 8-bit values
5469 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5470 and then repacking them again for OUTERMODE. */
5472 static rtx
5473 simplify_immed_subreg (machine_mode outermode, rtx op,
5474 machine_mode innermode, unsigned int byte)
5476 enum {
5477 value_bit = 8,
5478 value_mask = (1 << value_bit) - 1
5480 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5481 int value_start;
5482 int i;
5483 int elem;
5485 int num_elem;
5486 rtx * elems;
5487 int elem_bitsize;
5488 rtx result_s;
5489 rtvec result_v = NULL;
5490 enum mode_class outer_class;
5491 machine_mode outer_submode;
5492 int max_bitsize;
5494 /* Some ports misuse CCmode. */
5495 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5496 return op;
5498 /* We have no way to represent a complex constant at the rtl level. */
5499 if (COMPLEX_MODE_P (outermode))
5500 return NULL_RTX;
5502 /* We support any size mode. */
5503 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5504 GET_MODE_BITSIZE (innermode));
5506 /* Unpack the value. */
5508 if (GET_CODE (op) == CONST_VECTOR)
5510 num_elem = CONST_VECTOR_NUNITS (op);
5511 elems = &CONST_VECTOR_ELT (op, 0);
5512 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5514 else
5516 num_elem = 1;
5517 elems = &op;
5518 elem_bitsize = max_bitsize;
5520 /* If this asserts, it is too complicated; reducing value_bit may help. */
5521 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5522 /* I don't know how to handle endianness of sub-units. */
5523 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5525 for (elem = 0; elem < num_elem; elem++)
5527 unsigned char * vp;
5528 rtx el = elems[elem];
5530 /* Vectors are kept in target memory order. (This is probably
5531 a mistake.) */
5533 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5534 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5535 / BITS_PER_UNIT);
5536 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5537 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5538 unsigned bytele = (subword_byte % UNITS_PER_WORD
5539 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5540 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5543 switch (GET_CODE (el))
5545 case CONST_INT:
5546 for (i = 0;
5547 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5548 i += value_bit)
5549 *vp++ = INTVAL (el) >> i;
5550 /* CONST_INTs are always logically sign-extended. */
5551 for (; i < elem_bitsize; i += value_bit)
5552 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5553 break;
5555 case CONST_WIDE_INT:
5557 rtx_mode_t val = std::make_pair (el, innermode);
5558 unsigned char extend = wi::sign_mask (val);
5560 for (i = 0; i < elem_bitsize; i += value_bit)
5561 *vp++ = wi::extract_uhwi (val, i, value_bit);
5562 for (; i < elem_bitsize; i += value_bit)
5563 *vp++ = extend;
5565 break;
5567 case CONST_DOUBLE:
5568 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5570 unsigned char extend = 0;
5571 /* If this triggers, someone should have generated a
5572 CONST_INT instead. */
5573 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5575 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5576 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5577 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5579 *vp++
5580 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5581 i += value_bit;
5584 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5585 extend = -1;
5586 for (; i < elem_bitsize; i += value_bit)
5587 *vp++ = extend;
5589 else
5591 /* This is big enough for anything on the platform. */
5592 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5593 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5595 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5596 gcc_assert (bitsize <= elem_bitsize);
5597 gcc_assert (bitsize % value_bit == 0);
5599 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5600 GET_MODE (el));
5602 /* real_to_target produces its result in words affected by
5603 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5604 and use WORDS_BIG_ENDIAN instead; see the documentation
5605 of SUBREG in rtl.texi. */
5606 for (i = 0; i < bitsize; i += value_bit)
5608 int ibase;
5609 if (WORDS_BIG_ENDIAN)
5610 ibase = bitsize - 1 - i;
5611 else
5612 ibase = i;
5613 *vp++ = tmp[ibase / 32] >> i % 32;
5616 /* It shouldn't matter what's done here, so fill it with
5617 zero. */
5618 for (; i < elem_bitsize; i += value_bit)
5619 *vp++ = 0;
5621 break;
5623 case CONST_FIXED:
5624 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5626 for (i = 0; i < elem_bitsize; i += value_bit)
5627 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5629 else
5631 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5632 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5633 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5634 i += value_bit)
5635 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5636 >> (i - HOST_BITS_PER_WIDE_INT);
5637 for (; i < elem_bitsize; i += value_bit)
5638 *vp++ = 0;
5640 break;
5642 default:
5643 gcc_unreachable ();
5647 /* Now, pick the right byte to start with. */
5648 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5649 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5650 will already have offset 0. */
5651 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5653 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5654 - byte);
5655 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5656 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5657 byte = (subword_byte % UNITS_PER_WORD
5658 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5661 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5662 so if it's become negative it will instead be very large.) */
5663 gcc_assert (byte < GET_MODE_SIZE (innermode));
5665 /* Convert from bytes to chunks of size value_bit. */
5666 value_start = byte * (BITS_PER_UNIT / value_bit);
5668 /* Re-pack the value. */
5669 num_elem = GET_MODE_NUNITS (outermode);
5671 if (VECTOR_MODE_P (outermode))
5673 result_v = rtvec_alloc (num_elem);
5674 elems = &RTVEC_ELT (result_v, 0);
5676 else
5677 elems = &result_s;
5679 outer_submode = GET_MODE_INNER (outermode);
5680 outer_class = GET_MODE_CLASS (outer_submode);
5681 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5683 gcc_assert (elem_bitsize % value_bit == 0);
5684 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5686 for (elem = 0; elem < num_elem; elem++)
5688 unsigned char *vp;
5690 /* Vectors are stored in target memory order. (This is probably
5691 a mistake.) */
5693 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5694 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5695 / BITS_PER_UNIT);
5696 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5697 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5698 unsigned bytele = (subword_byte % UNITS_PER_WORD
5699 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5700 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5703 switch (outer_class)
5705 case MODE_INT:
5706 case MODE_PARTIAL_INT:
5708 int u;
5709 int base = 0;
5710 int units
5711 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5712 / HOST_BITS_PER_WIDE_INT;
5713 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5714 wide_int r;
5716 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5717 return NULL_RTX;
5718 for (u = 0; u < units; u++)
5720 unsigned HOST_WIDE_INT buf = 0;
5721 for (i = 0;
5722 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5723 i += value_bit)
5724 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5726 tmp[u] = buf;
5727 base += HOST_BITS_PER_WIDE_INT;
5729 r = wide_int::from_array (tmp, units,
5730 GET_MODE_PRECISION (outer_submode));
5731 #if TARGET_SUPPORTS_WIDE_INT == 0
5732 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5733 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5734 return NULL_RTX;
5735 #endif
5736 elems[elem] = immed_wide_int_const (r, outer_submode);
5738 break;
5740 case MODE_FLOAT:
5741 case MODE_DECIMAL_FLOAT:
5743 REAL_VALUE_TYPE r;
5744 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5746 /* real_from_target wants its input in words affected by
5747 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5748 and use WORDS_BIG_ENDIAN instead; see the documentation
5749 of SUBREG in rtl.texi. */
5750 for (i = 0; i < max_bitsize / 32; i++)
5751 tmp[i] = 0;
5752 for (i = 0; i < elem_bitsize; i += value_bit)
5754 int ibase;
5755 if (WORDS_BIG_ENDIAN)
5756 ibase = elem_bitsize - 1 - i;
5757 else
5758 ibase = i;
5759 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5762 real_from_target (&r, tmp, outer_submode);
5763 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5765 break;
5767 case MODE_FRACT:
5768 case MODE_UFRACT:
5769 case MODE_ACCUM:
5770 case MODE_UACCUM:
5772 FIXED_VALUE_TYPE f;
5773 f.data.low = 0;
5774 f.data.high = 0;
5775 f.mode = outer_submode;
5777 for (i = 0;
5778 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5779 i += value_bit)
5780 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5781 for (; i < elem_bitsize; i += value_bit)
5782 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5783 << (i - HOST_BITS_PER_WIDE_INT));
5785 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5787 break;
5789 default:
5790 gcc_unreachable ();
5793 if (VECTOR_MODE_P (outermode))
5794 return gen_rtx_CONST_VECTOR (outermode, result_v);
5795 else
5796 return result_s;
5799 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5800 Return 0 if no simplifications are possible. */
5802 simplify_subreg (machine_mode outermode, rtx op,
5803 machine_mode innermode, unsigned int byte)
5805 /* Little bit of sanity checking. */
5806 gcc_assert (innermode != VOIDmode);
5807 gcc_assert (outermode != VOIDmode);
5808 gcc_assert (innermode != BLKmode);
5809 gcc_assert (outermode != BLKmode);
5811 gcc_assert (GET_MODE (op) == innermode
5812 || GET_MODE (op) == VOIDmode);
5814 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5815 return NULL_RTX;
5817 if (byte >= GET_MODE_SIZE (innermode))
5818 return NULL_RTX;
5820 if (outermode == innermode && !byte)
5821 return op;
5823 if (CONST_SCALAR_INT_P (op)
5824 || CONST_DOUBLE_AS_FLOAT_P (op)
5825 || GET_CODE (op) == CONST_FIXED
5826 || GET_CODE (op) == CONST_VECTOR)
5827 return simplify_immed_subreg (outermode, op, innermode, byte);
5829 /* Changing mode twice with SUBREG => just change it once,
5830 or not at all if changing back op starting mode. */
5831 if (GET_CODE (op) == SUBREG)
5833 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5834 int final_offset = byte + SUBREG_BYTE (op);
5835 rtx newx;
5837 if (outermode == innermostmode
5838 && byte == 0 && SUBREG_BYTE (op) == 0)
5839 return SUBREG_REG (op);
5841 /* The SUBREG_BYTE represents offset, as if the value were stored
5842 in memory. Irritating exception is paradoxical subreg, where
5843 we define SUBREG_BYTE to be 0. On big endian machines, this
5844 value should be negative. For a moment, undo this exception. */
5845 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5847 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5848 if (WORDS_BIG_ENDIAN)
5849 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5850 if (BYTES_BIG_ENDIAN)
5851 final_offset += difference % UNITS_PER_WORD;
5853 if (SUBREG_BYTE (op) == 0
5854 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5856 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5857 if (WORDS_BIG_ENDIAN)
5858 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5859 if (BYTES_BIG_ENDIAN)
5860 final_offset += difference % UNITS_PER_WORD;
5863 /* See whether resulting subreg will be paradoxical. */
5864 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5866 /* In nonparadoxical subregs we can't handle negative offsets. */
5867 if (final_offset < 0)
5868 return NULL_RTX;
5869 /* Bail out in case resulting subreg would be incorrect. */
5870 if (final_offset % GET_MODE_SIZE (outermode)
5871 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5872 return NULL_RTX;
5874 else
5876 int offset = 0;
5877 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5879 /* In paradoxical subreg, see if we are still looking on lower part.
5880 If so, our SUBREG_BYTE will be 0. */
5881 if (WORDS_BIG_ENDIAN)
5882 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5883 if (BYTES_BIG_ENDIAN)
5884 offset += difference % UNITS_PER_WORD;
5885 if (offset == final_offset)
5886 final_offset = 0;
5887 else
5888 return NULL_RTX;
5891 /* Recurse for further possible simplifications. */
5892 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5893 final_offset);
5894 if (newx)
5895 return newx;
5896 if (validate_subreg (outermode, innermostmode,
5897 SUBREG_REG (op), final_offset))
5899 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5900 if (SUBREG_PROMOTED_VAR_P (op)
5901 && SUBREG_PROMOTED_SIGN (op) >= 0
5902 && GET_MODE_CLASS (outermode) == MODE_INT
5903 && IN_RANGE (GET_MODE_SIZE (outermode),
5904 GET_MODE_SIZE (innermode),
5905 GET_MODE_SIZE (innermostmode))
5906 && subreg_lowpart_p (newx))
5908 SUBREG_PROMOTED_VAR_P (newx) = 1;
5909 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5911 return newx;
5913 return NULL_RTX;
5916 /* SUBREG of a hard register => just change the register number
5917 and/or mode. If the hard register is not valid in that mode,
5918 suppress this simplification. If the hard register is the stack,
5919 frame, or argument pointer, leave this as a SUBREG. */
5921 if (REG_P (op) && HARD_REGISTER_P (op))
5923 unsigned int regno, final_regno;
5925 regno = REGNO (op);
5926 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5927 if (HARD_REGISTER_NUM_P (final_regno))
5929 rtx x;
5930 int final_offset = byte;
5932 /* Adjust offset for paradoxical subregs. */
5933 if (byte == 0
5934 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5936 int difference = (GET_MODE_SIZE (innermode)
5937 - GET_MODE_SIZE (outermode));
5938 if (WORDS_BIG_ENDIAN)
5939 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5940 if (BYTES_BIG_ENDIAN)
5941 final_offset += difference % UNITS_PER_WORD;
5944 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5946 /* Propagate original regno. We don't have any way to specify
5947 the offset inside original regno, so do so only for lowpart.
5948 The information is used only by alias analysis that can not
5949 grog partial register anyway. */
5951 if (subreg_lowpart_offset (outermode, innermode) == byte)
5952 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5953 return x;
5957 /* If we have a SUBREG of a register that we are replacing and we are
5958 replacing it with a MEM, make a new MEM and try replacing the
5959 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5960 or if we would be widening it. */
5962 if (MEM_P (op)
5963 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5964 /* Allow splitting of volatile memory references in case we don't
5965 have instruction to move the whole thing. */
5966 && (! MEM_VOLATILE_P (op)
5967 || ! have_insn_for (SET, innermode))
5968 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5969 return adjust_address_nv (op, outermode, byte);
5971 /* Handle complex values represented as CONCAT
5972 of real and imaginary part. */
5973 if (GET_CODE (op) == CONCAT)
5975 unsigned int part_size, final_offset;
5976 rtx part, res;
5978 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5979 if (byte < part_size)
5981 part = XEXP (op, 0);
5982 final_offset = byte;
5984 else
5986 part = XEXP (op, 1);
5987 final_offset = byte - part_size;
5990 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5991 return NULL_RTX;
5993 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5994 if (res)
5995 return res;
5996 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5997 return gen_rtx_SUBREG (outermode, part, final_offset);
5998 return NULL_RTX;
6001 /* A SUBREG resulting from a zero extension may fold to zero if
6002 it extracts higher bits that the ZERO_EXTEND's source bits. */
6003 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6005 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6006 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6007 return CONST0_RTX (outermode);
6010 if (SCALAR_INT_MODE_P (outermode)
6011 && SCALAR_INT_MODE_P (innermode)
6012 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6013 && byte == subreg_lowpart_offset (outermode, innermode))
6015 rtx tem = simplify_truncation (outermode, op, innermode);
6016 if (tem)
6017 return tem;
6020 return NULL_RTX;
6023 /* Make a SUBREG operation or equivalent if it folds. */
6026 simplify_gen_subreg (machine_mode outermode, rtx op,
6027 machine_mode innermode, unsigned int byte)
6029 rtx newx;
6031 newx = simplify_subreg (outermode, op, innermode, byte);
6032 if (newx)
6033 return newx;
6035 if (GET_CODE (op) == SUBREG
6036 || GET_CODE (op) == CONCAT
6037 || GET_MODE (op) == VOIDmode)
6038 return NULL_RTX;
6040 if (validate_subreg (outermode, innermode, op, byte))
6041 return gen_rtx_SUBREG (outermode, op, byte);
6043 return NULL_RTX;
6046 /* Generates a subreg to get the least significant part of EXPR (in mode
6047 INNER_MODE) to OUTER_MODE. */
6050 lowpart_subreg (machine_mode outer_mode, rtx expr,
6051 machine_mode inner_mode)
6053 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6054 subreg_lowpart_offset (outer_mode, inner_mode));
6057 /* Simplify X, an rtx expression.
6059 Return the simplified expression or NULL if no simplifications
6060 were possible.
6062 This is the preferred entry point into the simplification routines;
6063 however, we still allow passes to call the more specific routines.
6065 Right now GCC has three (yes, three) major bodies of RTL simplification
6066 code that need to be unified.
6068 1. fold_rtx in cse.c. This code uses various CSE specific
6069 information to aid in RTL simplification.
6071 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6072 it uses combine specific information to aid in RTL
6073 simplification.
6075 3. The routines in this file.
6078 Long term we want to only have one body of simplification code; to
6079 get to that state I recommend the following steps:
6081 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6082 which are not pass dependent state into these routines.
6084 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6085 use this routine whenever possible.
6087 3. Allow for pass dependent state to be provided to these
6088 routines and add simplifications based on the pass dependent
6089 state. Remove code from cse.c & combine.c that becomes
6090 redundant/dead.
6092 It will take time, but ultimately the compiler will be easier to
6093 maintain and improve. It's totally silly that when we add a
6094 simplification that it needs to be added to 4 places (3 for RTL
6095 simplification and 1 for tree simplification. */
6098 simplify_rtx (const_rtx x)
6100 const enum rtx_code code = GET_CODE (x);
6101 const machine_mode mode = GET_MODE (x);
6103 switch (GET_RTX_CLASS (code))
6105 case RTX_UNARY:
6106 return simplify_unary_operation (code, mode,
6107 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6108 case RTX_COMM_ARITH:
6109 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6110 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6112 /* Fall through.... */
6114 case RTX_BIN_ARITH:
6115 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6117 case RTX_TERNARY:
6118 case RTX_BITFIELD_OPS:
6119 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6120 XEXP (x, 0), XEXP (x, 1),
6121 XEXP (x, 2));
6123 case RTX_COMPARE:
6124 case RTX_COMM_COMPARE:
6125 return simplify_relational_operation (code, mode,
6126 ((GET_MODE (XEXP (x, 0))
6127 != VOIDmode)
6128 ? GET_MODE (XEXP (x, 0))
6129 : GET_MODE (XEXP (x, 1))),
6130 XEXP (x, 0),
6131 XEXP (x, 1));
6133 case RTX_EXTRA:
6134 if (code == SUBREG)
6135 return simplify_subreg (mode, SUBREG_REG (x),
6136 GET_MODE (SUBREG_REG (x)),
6137 SUBREG_BYTE (x));
6138 break;
6140 case RTX_OBJ:
6141 if (code == LO_SUM)
6143 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6144 if (GET_CODE (XEXP (x, 0)) == HIGH
6145 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6146 return XEXP (x, 1);
6148 break;
6150 default:
6151 break;
6153 return NULL;