2017-10-27 Steven G. Kargl <kargl@gcc.gnu.org>
[official-gcc.git] / gcc / simplify-rtx.c
blobda0283dd1bf8a350f8f68ac00ad3ae70a46b54b6
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
328 break;
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
340 rtx newx;
342 offset += MEM_OFFSET (x);
344 newx = DECL_RTL (decl);
346 if (MEM_P (newx))
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
375 return x;
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
385 rtx tem;
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
391 return gen_rtx_fmt_e (code, mode, op);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
400 rtx tem;
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
417 rtx tem;
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
443 if (__builtin_expect (fn != NULL, 0))
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
452 switch (GET_RTX_CLASS (code))
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
494 case RTX_EXTRA:
495 if (code == SUBREG)
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
505 break;
507 case RTX_OBJ:
508 if (code == MEM)
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
515 else if (code == LO_SUM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
534 break;
536 default:
537 break;
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
554 if (newvec == vec)
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
561 RTVEC_ELT (newvec, j) = op;
564 break;
566 case 'e':
567 if (XEXP (x, i))
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
577 break;
579 return newx;
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
617 should be used.
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
625 (and:DI X Y)
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 scalar_int_mode int_mode, int_op_mode, subreg_mode;
646 gcc_assert (precision <= op_precision);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
730 and C2. */
731 if (GET_CODE (op) == AND
732 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
734 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
735 && CONST_INT_P (XEXP (op, 1)))
737 rtx op0 = (XEXP (XEXP (op, 0), 0));
738 rtx shift_op = XEXP (XEXP (op, 0), 1);
739 rtx mask_op = XEXP (op, 1);
740 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
741 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
743 if (shift < precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode) >> shift) & mask)
747 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
748 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
749 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
751 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
752 return simplify_gen_binary (AND, mode, op0, mask_op);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
758 changing len. */
759 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
760 && REG_P (XEXP (op, 0))
761 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
762 && CONST_INT_P (XEXP (op, 1))
763 && CONST_INT_P (XEXP (op, 2)))
765 rtx op0 = XEXP (op, 0);
766 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
767 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
768 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
773 pos -= op_precision - precision;
774 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
775 XEXP (op, 1), GEN_INT (pos));
778 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
780 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
781 if (op0)
782 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
783 XEXP (op, 1), XEXP (op, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && SCALAR_INT_MODE_P (mode)
791 && SCALAR_INT_MODE_P (op_mode)
792 && precision >= BITS_PER_WORD
793 && 2 * precision <= op_precision
794 && CONST_INT_P (XEXP (op, 1))
795 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
796 && UINTVAL (XEXP (op, 1)) < op_precision)
798 int byte = subreg_lowpart_offset (mode, op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op) == LSHIFTRT
810 || GET_CODE (op) == ASHIFTRT)
811 && is_a <scalar_int_mode> (mode, &int_mode)
812 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
813 && MEM_P (XEXP (op, 0))
814 && CONST_INT_P (XEXP (op, 1))
815 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
816 && INTVAL (XEXP (op, 1)) > 0
817 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
818 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op, 0))
821 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
824 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
825 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
826 return adjust_address_nv (XEXP (op, 0), int_mode,
827 (WORDS_BIG_ENDIAN
828 ? byte - shifted_bytes
829 : byte + shifted_bytes));
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op) == ABS
835 || GET_CODE (op) == NEG)
836 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
838 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
839 return simplify_gen_unary (GET_CODE (op), mode,
840 XEXP (XEXP (op, 0), 0), mode);
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
843 (truncate:A X). */
844 if (GET_CODE (op) == SUBREG
845 && is_a <scalar_int_mode> (mode, &int_mode)
846 && SCALAR_INT_MODE_P (op_mode)
847 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
848 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
849 && subreg_lowpart_p (op))
851 rtx inner = XEXP (SUBREG_REG (op), 0);
852 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
853 return simplify_gen_unary (TRUNCATE, int_mode, inner,
854 GET_MODE (inner));
855 else
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
875 return NULL_RTX;
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_unary_operation (enum rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
885 rtx trueop, tem;
887 trueop = avoid_constant_pool_reference (op);
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
893 return simplify_unary_operation_1 (code, mode, op);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
919 return in_bits <= out_bits;
922 /* Perform some simplifications we can do even if the operands
923 aren't constant. */
924 static rtx
925 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
927 enum rtx_code reversed;
928 rtx temp;
929 scalar_int_mode inner, int_mode, op_mode, op0_mode;
931 switch (code)
933 case NOT:
934 /* (not (not X)) == X. */
935 if (GET_CODE (op) == NOT)
936 return XEXP (op, 0);
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op)
941 && (mode == BImode || STORE_FLAG_VALUE == -1)
942 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
943 return simplify_gen_relational (reversed, mode, VOIDmode,
944 XEXP (op, 0), XEXP (op, 1));
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == constm1_rtx)
949 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
955 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
956 CONSTM1_RTX (mode));
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op) == XOR
960 && CONST_INT_P (XEXP (op, 1))
961 && (temp = simplify_unary_operation (NOT, mode,
962 XEXP (op, 1), mode)) != 0)
963 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op) == PLUS
967 && CONST_INT_P (XEXP (op, 1))
968 && mode_signbit_p (mode, XEXP (op, 1))
969 && (temp = simplify_unary_operation (NOT, mode,
970 XEXP (op, 1), mode)) != 0)
971 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
978 bother with. */
979 if (GET_CODE (op) == ASHIFT
980 && XEXP (op, 0) == const1_rtx)
982 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
983 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE == -1
990 && is_a <scalar_int_mode> (mode, &int_mode)
991 && GET_CODE (op) == ASHIFTRT
992 && CONST_INT_P (XEXP (op, 1))
993 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
994 return simplify_gen_relational (GE, int_mode, VOIDmode,
995 XEXP (op, 0), const0_rtx);
998 if (partial_subreg_p (op)
999 && subreg_lowpart_p (op)
1000 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1001 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1003 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1004 rtx x;
1006 x = gen_rtx_ROTATE (inner_mode,
1007 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1008 inner_mode),
1009 XEXP (SUBREG_REG (op), 1));
1010 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1011 if (temp)
1012 return temp;
1015 /* Apply De Morgan's laws to reduce number of patterns for machines
1016 with negating logical insns (and-not, nand, etc.). If result has
1017 only one NOT, put it first, since that is how the patterns are
1018 coded. */
1019 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1021 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1022 machine_mode op_mode;
1024 op_mode = GET_MODE (in1);
1025 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1027 op_mode = GET_MODE (in2);
1028 if (op_mode == VOIDmode)
1029 op_mode = mode;
1030 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1032 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1033 std::swap (in1, in2);
1035 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1036 mode, in1, in2);
1039 /* (not (bswap x)) -> (bswap (not x)). */
1040 if (GET_CODE (op) == BSWAP)
1042 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1043 return simplify_gen_unary (BSWAP, mode, x, mode);
1045 break;
1047 case NEG:
1048 /* (neg (neg X)) == X. */
1049 if (GET_CODE (op) == NEG)
1050 return XEXP (op, 0);
1052 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1053 If comparison is not reversible use
1054 x ? y : (neg y). */
1055 if (GET_CODE (op) == IF_THEN_ELSE)
1057 rtx cond = XEXP (op, 0);
1058 rtx true_rtx = XEXP (op, 1);
1059 rtx false_rtx = XEXP (op, 2);
1061 if ((GET_CODE (true_rtx) == NEG
1062 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1063 || (GET_CODE (false_rtx) == NEG
1064 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1066 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1067 temp = reversed_comparison (cond, mode);
1068 else
1070 temp = cond;
1071 std::swap (true_rtx, false_rtx);
1073 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1074 mode, temp, true_rtx, false_rtx);
1078 /* (neg (plus X 1)) can become (not X). */
1079 if (GET_CODE (op) == PLUS
1080 && XEXP (op, 1) == const1_rtx)
1081 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1083 /* Similarly, (neg (not X)) is (plus X 1). */
1084 if (GET_CODE (op) == NOT)
1085 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1086 CONST1_RTX (mode));
1088 /* (neg (minus X Y)) can become (minus Y X). This transformation
1089 isn't safe for modes with signed zeros, since if X and Y are
1090 both +0, (minus Y X) is the same as (minus X Y). If the
1091 rounding mode is towards +infinity (or -infinity) then the two
1092 expressions will be rounded differently. */
1093 if (GET_CODE (op) == MINUS
1094 && !HONOR_SIGNED_ZEROS (mode)
1095 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1096 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1098 if (GET_CODE (op) == PLUS
1099 && !HONOR_SIGNED_ZEROS (mode)
1100 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1102 /* (neg (plus A C)) is simplified to (minus -C A). */
1103 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1104 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1106 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1107 if (temp)
1108 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1111 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1112 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1113 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1116 /* (neg (mult A B)) becomes (mult A (neg B)).
1117 This works even for floating-point values. */
1118 if (GET_CODE (op) == MULT
1119 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1121 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1122 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1125 /* NEG commutes with ASHIFT since it is multiplication. Only do
1126 this if we can then eliminate the NEG (e.g., if the operand
1127 is a constant). */
1128 if (GET_CODE (op) == ASHIFT)
1130 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1131 if (temp)
1132 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1135 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1136 C is equal to the width of MODE minus 1. */
1137 if (GET_CODE (op) == ASHIFTRT
1138 && CONST_INT_P (XEXP (op, 1))
1139 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1140 return simplify_gen_binary (LSHIFTRT, mode,
1141 XEXP (op, 0), XEXP (op, 1));
1143 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1144 C is equal to the width of MODE minus 1. */
1145 if (GET_CODE (op) == LSHIFTRT
1146 && CONST_INT_P (XEXP (op, 1))
1147 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1148 return simplify_gen_binary (ASHIFTRT, mode,
1149 XEXP (op, 0), XEXP (op, 1));
1151 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1152 if (GET_CODE (op) == XOR
1153 && XEXP (op, 1) == const1_rtx
1154 && nonzero_bits (XEXP (op, 0), mode) == 1)
1155 return plus_constant (mode, XEXP (op, 0), -1);
1157 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1158 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1159 if (GET_CODE (op) == LT
1160 && XEXP (op, 1) == const0_rtx
1161 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1163 int_mode = as_a <scalar_int_mode> (mode);
1164 int isize = GET_MODE_PRECISION (inner);
1165 if (STORE_FLAG_VALUE == 1)
1167 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1168 GEN_INT (isize - 1));
1169 if (int_mode == inner)
1170 return temp;
1171 if (GET_MODE_PRECISION (int_mode) > isize)
1172 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1173 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1175 else if (STORE_FLAG_VALUE == -1)
1177 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1178 GEN_INT (isize - 1));
1179 if (int_mode == inner)
1180 return temp;
1181 if (GET_MODE_PRECISION (int_mode) > isize)
1182 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1183 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1186 break;
1188 case TRUNCATE:
1189 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1190 with the umulXi3_highpart patterns. */
1191 if (GET_CODE (op) == LSHIFTRT
1192 && GET_CODE (XEXP (op, 0)) == MULT)
1193 break;
1195 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1197 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1199 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1200 if (temp)
1201 return temp;
1203 /* We can't handle truncation to a partial integer mode here
1204 because we don't know the real bitsize of the partial
1205 integer mode. */
1206 break;
1209 if (GET_MODE (op) != VOIDmode)
1211 temp = simplify_truncation (mode, op, GET_MODE (op));
1212 if (temp)
1213 return temp;
1216 /* If we know that the value is already truncated, we can
1217 replace the TRUNCATE with a SUBREG. */
1218 if (GET_MODE_NUNITS (mode) == 1
1219 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1220 || truncated_to_mode (mode, op)))
1222 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1223 if (temp)
1224 return temp;
1227 /* A truncate of a comparison can be replaced with a subreg if
1228 STORE_FLAG_VALUE permits. This is like the previous test,
1229 but it works even if the comparison is done in a mode larger
1230 than HOST_BITS_PER_WIDE_INT. */
1231 if (HWI_COMPUTABLE_MODE_P (mode)
1232 && COMPARISON_P (op)
1233 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1235 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1236 if (temp)
1237 return temp;
1240 /* A truncate of a memory is just loading the low part of the memory
1241 if we are not changing the meaning of the address. */
1242 if (GET_CODE (op) == MEM
1243 && !VECTOR_MODE_P (mode)
1244 && !MEM_VOLATILE_P (op)
1245 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1247 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1248 if (temp)
1249 return temp;
1252 break;
1254 case FLOAT_TRUNCATE:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1258 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1259 if (GET_CODE (op) == FLOAT_EXTEND
1260 && GET_MODE (XEXP (op, 0)) == mode)
1261 return XEXP (op, 0);
1263 /* (float_truncate:SF (float_truncate:DF foo:XF))
1264 = (float_truncate:SF foo:XF).
1265 This may eliminate double rounding, so it is unsafe.
1267 (float_truncate:SF (float_extend:XF foo:DF))
1268 = (float_truncate:SF foo:DF).
1270 (float_truncate:DF (float_extend:XF foo:SF))
1271 = (float_extend:DF foo:SF). */
1272 if ((GET_CODE (op) == FLOAT_TRUNCATE
1273 && flag_unsafe_math_optimizations)
1274 || GET_CODE (op) == FLOAT_EXTEND)
1275 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1276 > GET_MODE_UNIT_SIZE (mode)
1277 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1278 mode,
1279 XEXP (op, 0), mode);
1281 /* (float_truncate (float x)) is (float x) */
1282 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1283 && (flag_unsafe_math_optimizations
1284 || exact_int_to_float_conversion_p (op)))
1285 return simplify_gen_unary (GET_CODE (op), mode,
1286 XEXP (op, 0),
1287 GET_MODE (XEXP (op, 0)));
1289 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1290 (OP:SF foo:SF) if OP is NEG or ABS. */
1291 if ((GET_CODE (op) == ABS
1292 || GET_CODE (op) == NEG)
1293 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1294 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1295 return simplify_gen_unary (GET_CODE (op), mode,
1296 XEXP (XEXP (op, 0), 0), mode);
1298 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1299 is (float_truncate:SF x). */
1300 if (GET_CODE (op) == SUBREG
1301 && subreg_lowpart_p (op)
1302 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1303 return SUBREG_REG (op);
1304 break;
1306 case FLOAT_EXTEND:
1307 if (DECIMAL_FLOAT_MODE_P (mode))
1308 break;
1310 /* (float_extend (float_extend x)) is (float_extend x)
1312 (float_extend (float x)) is (float x) assuming that double
1313 rounding can't happen.
1315 if (GET_CODE (op) == FLOAT_EXTEND
1316 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1317 && exact_int_to_float_conversion_p (op)))
1318 return simplify_gen_unary (GET_CODE (op), mode,
1319 XEXP (op, 0),
1320 GET_MODE (XEXP (op, 0)));
1322 break;
1324 case ABS:
1325 /* (abs (neg <foo>)) -> (abs <foo>) */
1326 if (GET_CODE (op) == NEG)
1327 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1328 GET_MODE (XEXP (op, 0)));
1330 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1331 do nothing. */
1332 if (GET_MODE (op) == VOIDmode)
1333 break;
1335 /* If operand is something known to be positive, ignore the ABS. */
1336 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1337 || val_signbit_known_clear_p (GET_MODE (op),
1338 nonzero_bits (op, GET_MODE (op))))
1339 return op;
1341 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1342 if (is_a <scalar_int_mode> (mode, &int_mode)
1343 && (num_sign_bit_copies (op, int_mode)
1344 == GET_MODE_PRECISION (int_mode)))
1345 return gen_rtx_NEG (int_mode, op);
1347 break;
1349 case FFS:
1350 /* (ffs (*_extend <X>)) = (ffs <X>) */
1351 if (GET_CODE (op) == SIGN_EXTEND
1352 || GET_CODE (op) == ZERO_EXTEND)
1353 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1354 GET_MODE (XEXP (op, 0)));
1355 break;
1357 case POPCOUNT:
1358 switch (GET_CODE (op))
1360 case BSWAP:
1361 case ZERO_EXTEND:
1362 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1363 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1364 GET_MODE (XEXP (op, 0)));
1366 case ROTATE:
1367 case ROTATERT:
1368 /* Rotations don't affect popcount. */
1369 if (!side_effects_p (XEXP (op, 1)))
1370 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1371 GET_MODE (XEXP (op, 0)));
1372 break;
1374 default:
1375 break;
1377 break;
1379 case PARITY:
1380 switch (GET_CODE (op))
1382 case NOT:
1383 case BSWAP:
1384 case ZERO_EXTEND:
1385 case SIGN_EXTEND:
1386 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1387 GET_MODE (XEXP (op, 0)));
1389 case ROTATE:
1390 case ROTATERT:
1391 /* Rotations don't affect parity. */
1392 if (!side_effects_p (XEXP (op, 1)))
1393 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1394 GET_MODE (XEXP (op, 0)));
1395 break;
1397 default:
1398 break;
1400 break;
1402 case BSWAP:
1403 /* (bswap (bswap x)) -> x. */
1404 if (GET_CODE (op) == BSWAP)
1405 return XEXP (op, 0);
1406 break;
1408 case FLOAT:
1409 /* (float (sign_extend <X>)) = (float <X>). */
1410 if (GET_CODE (op) == SIGN_EXTEND)
1411 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1412 GET_MODE (XEXP (op, 0)));
1413 break;
1415 case SIGN_EXTEND:
1416 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1417 becomes just the MINUS if its mode is MODE. This allows
1418 folding switch statements on machines using casesi (such as
1419 the VAX). */
1420 if (GET_CODE (op) == TRUNCATE
1421 && GET_MODE (XEXP (op, 0)) == mode
1422 && GET_CODE (XEXP (op, 0)) == MINUS
1423 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1424 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1425 return XEXP (op, 0);
1427 /* Extending a widening multiplication should be canonicalized to
1428 a wider widening multiplication. */
1429 if (GET_CODE (op) == MULT)
1431 rtx lhs = XEXP (op, 0);
1432 rtx rhs = XEXP (op, 1);
1433 enum rtx_code lcode = GET_CODE (lhs);
1434 enum rtx_code rcode = GET_CODE (rhs);
1436 /* Widening multiplies usually extend both operands, but sometimes
1437 they use a shift to extract a portion of a register. */
1438 if ((lcode == SIGN_EXTEND
1439 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1440 && (rcode == SIGN_EXTEND
1441 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1443 machine_mode lmode = GET_MODE (lhs);
1444 machine_mode rmode = GET_MODE (rhs);
1445 int bits;
1447 if (lcode == ASHIFTRT)
1448 /* Number of bits not shifted off the end. */
1449 bits = (GET_MODE_UNIT_PRECISION (lmode)
1450 - INTVAL (XEXP (lhs, 1)));
1451 else /* lcode == SIGN_EXTEND */
1452 /* Size of inner mode. */
1453 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1455 if (rcode == ASHIFTRT)
1456 bits += (GET_MODE_UNIT_PRECISION (rmode)
1457 - INTVAL (XEXP (rhs, 1)));
1458 else /* rcode == SIGN_EXTEND */
1459 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1461 /* We can only widen multiplies if the result is mathematiclly
1462 equivalent. I.e. if overflow was impossible. */
1463 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1464 return simplify_gen_binary
1465 (MULT, mode,
1466 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1467 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1471 /* Check for a sign extension of a subreg of a promoted
1472 variable, where the promotion is sign-extended, and the
1473 target mode is the same as the variable's promotion. */
1474 if (GET_CODE (op) == SUBREG
1475 && SUBREG_PROMOTED_VAR_P (op)
1476 && SUBREG_PROMOTED_SIGNED_P (op)
1477 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1479 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1480 if (temp)
1481 return temp;
1484 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1485 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1486 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1488 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1489 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1490 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1491 GET_MODE (XEXP (op, 0)));
1494 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 is (sign_extend:M (subreg:O <X>)) if there is mode with
1496 GET_MODE_BITSIZE (N) - I bits.
1497 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1498 is similarly (zero_extend:M (subreg:O <X>)). */
1499 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1500 && GET_CODE (XEXP (op, 0)) == ASHIFT
1501 && is_a <scalar_int_mode> (mode, &int_mode)
1502 && CONST_INT_P (XEXP (op, 1))
1503 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1504 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1505 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1507 scalar_int_mode tmode;
1508 gcc_assert (GET_MODE_BITSIZE (int_mode)
1509 > GET_MODE_BITSIZE (op_mode));
1510 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1511 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1513 rtx inner =
1514 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1515 if (inner)
1516 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1517 ? SIGN_EXTEND : ZERO_EXTEND,
1518 int_mode, inner, tmode);
1522 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1523 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1524 if (GET_CODE (op) == LSHIFTRT
1525 && CONST_INT_P (XEXP (op, 1))
1526 && XEXP (op, 1) != const0_rtx)
1527 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1529 #if defined(POINTERS_EXTEND_UNSIGNED)
1530 /* As we do not know which address space the pointer is referring to,
1531 we can do this only if the target does not support different pointer
1532 or address modes depending on the address space. */
1533 if (target_default_pointer_address_modes_p ()
1534 && ! POINTERS_EXTEND_UNSIGNED
1535 && mode == Pmode && GET_MODE (op) == ptr_mode
1536 && (CONSTANT_P (op)
1537 || (GET_CODE (op) == SUBREG
1538 && REG_P (SUBREG_REG (op))
1539 && REG_POINTER (SUBREG_REG (op))
1540 && GET_MODE (SUBREG_REG (op)) == Pmode))
1541 && !targetm.have_ptr_extend ())
1543 temp
1544 = convert_memory_address_addr_space_1 (Pmode, op,
1545 ADDR_SPACE_GENERIC, false,
1546 true);
1547 if (temp)
1548 return temp;
1550 #endif
1551 break;
1553 case ZERO_EXTEND:
1554 /* Check for a zero extension of a subreg of a promoted
1555 variable, where the promotion is zero-extended, and the
1556 target mode is the same as the variable's promotion. */
1557 if (GET_CODE (op) == SUBREG
1558 && SUBREG_PROMOTED_VAR_P (op)
1559 && SUBREG_PROMOTED_UNSIGNED_P (op)
1560 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1562 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1563 if (temp)
1564 return temp;
1567 /* Extending a widening multiplication should be canonicalized to
1568 a wider widening multiplication. */
1569 if (GET_CODE (op) == MULT)
1571 rtx lhs = XEXP (op, 0);
1572 rtx rhs = XEXP (op, 1);
1573 enum rtx_code lcode = GET_CODE (lhs);
1574 enum rtx_code rcode = GET_CODE (rhs);
1576 /* Widening multiplies usually extend both operands, but sometimes
1577 they use a shift to extract a portion of a register. */
1578 if ((lcode == ZERO_EXTEND
1579 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1580 && (rcode == ZERO_EXTEND
1581 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1583 machine_mode lmode = GET_MODE (lhs);
1584 machine_mode rmode = GET_MODE (rhs);
1585 int bits;
1587 if (lcode == LSHIFTRT)
1588 /* Number of bits not shifted off the end. */
1589 bits = (GET_MODE_UNIT_PRECISION (lmode)
1590 - INTVAL (XEXP (lhs, 1)));
1591 else /* lcode == ZERO_EXTEND */
1592 /* Size of inner mode. */
1593 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1595 if (rcode == LSHIFTRT)
1596 bits += (GET_MODE_UNIT_PRECISION (rmode)
1597 - INTVAL (XEXP (rhs, 1)));
1598 else /* rcode == ZERO_EXTEND */
1599 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1601 /* We can only widen multiplies if the result is mathematiclly
1602 equivalent. I.e. if overflow was impossible. */
1603 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1604 return simplify_gen_binary
1605 (MULT, mode,
1606 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1607 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1611 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1612 if (GET_CODE (op) == ZERO_EXTEND)
1613 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1614 GET_MODE (XEXP (op, 0)));
1616 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1617 is (zero_extend:M (subreg:O <X>)) if there is mode with
1618 GET_MODE_PRECISION (N) - I bits. */
1619 if (GET_CODE (op) == LSHIFTRT
1620 && GET_CODE (XEXP (op, 0)) == ASHIFT
1621 && is_a <scalar_int_mode> (mode, &int_mode)
1622 && CONST_INT_P (XEXP (op, 1))
1623 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1624 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1625 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1627 scalar_int_mode tmode;
1628 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1629 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1631 rtx inner =
1632 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1633 if (inner)
1634 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1635 inner, tmode);
1639 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1640 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1641 of mode N. E.g.
1642 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1643 (and:SI (reg:SI) (const_int 63)). */
1644 if (partial_subreg_p (op)
1645 && is_a <scalar_int_mode> (mode, &int_mode)
1646 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1647 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1648 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1649 && subreg_lowpart_p (op)
1650 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1651 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1653 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1654 return SUBREG_REG (op);
1655 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1656 op0_mode);
1659 #if defined(POINTERS_EXTEND_UNSIGNED)
1660 /* As we do not know which address space the pointer is referring to,
1661 we can do this only if the target does not support different pointer
1662 or address modes depending on the address space. */
1663 if (target_default_pointer_address_modes_p ()
1664 && POINTERS_EXTEND_UNSIGNED > 0
1665 && mode == Pmode && GET_MODE (op) == ptr_mode
1666 && (CONSTANT_P (op)
1667 || (GET_CODE (op) == SUBREG
1668 && REG_P (SUBREG_REG (op))
1669 && REG_POINTER (SUBREG_REG (op))
1670 && GET_MODE (SUBREG_REG (op)) == Pmode))
1671 && !targetm.have_ptr_extend ())
1673 temp
1674 = convert_memory_address_addr_space_1 (Pmode, op,
1675 ADDR_SPACE_GENERIC, false,
1676 true);
1677 if (temp)
1678 return temp;
1680 #endif
1681 break;
1683 default:
1684 break;
1687 return 0;
1690 /* Try to compute the value of a unary operation CODE whose output mode is to
1691 be MODE with input operand OP whose mode was originally OP_MODE.
1692 Return zero if the value cannot be computed. */
1694 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1695 rtx op, machine_mode op_mode)
1697 scalar_int_mode result_mode;
1699 if (code == VEC_DUPLICATE)
1701 gcc_assert (VECTOR_MODE_P (mode));
1702 if (GET_MODE (op) != VOIDmode)
1704 if (!VECTOR_MODE_P (GET_MODE (op)))
1705 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1706 else
1707 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1708 (GET_MODE (op)));
1710 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1711 || GET_CODE (op) == CONST_VECTOR)
1713 int elt_size = GET_MODE_UNIT_SIZE (mode);
1714 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1715 rtvec v = rtvec_alloc (n_elts);
1716 unsigned int i;
1718 if (GET_CODE (op) != CONST_VECTOR)
1719 for (i = 0; i < n_elts; i++)
1720 RTVEC_ELT (v, i) = op;
1721 else
1723 machine_mode inmode = GET_MODE (op);
1724 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1725 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1727 gcc_assert (in_n_elts < n_elts);
1728 gcc_assert ((n_elts % in_n_elts) == 0);
1729 for (i = 0; i < n_elts; i++)
1730 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1732 return gen_rtx_CONST_VECTOR (mode, v);
1736 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1738 int elt_size = GET_MODE_UNIT_SIZE (mode);
1739 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1740 machine_mode opmode = GET_MODE (op);
1741 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1742 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1743 rtvec v = rtvec_alloc (n_elts);
1744 unsigned int i;
1746 gcc_assert (op_n_elts == n_elts);
1747 for (i = 0; i < n_elts; i++)
1749 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1750 CONST_VECTOR_ELT (op, i),
1751 GET_MODE_INNER (opmode));
1752 if (!x)
1753 return 0;
1754 RTVEC_ELT (v, i) = x;
1756 return gen_rtx_CONST_VECTOR (mode, v);
1759 /* The order of these tests is critical so that, for example, we don't
1760 check the wrong mode (input vs. output) for a conversion operation,
1761 such as FIX. At some point, this should be simplified. */
1763 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1765 REAL_VALUE_TYPE d;
1767 if (op_mode == VOIDmode)
1769 /* CONST_INT have VOIDmode as the mode. We assume that all
1770 the bits of the constant are significant, though, this is
1771 a dangerous assumption as many times CONST_INTs are
1772 created and used with garbage in the bits outside of the
1773 precision of the implied mode of the const_int. */
1774 op_mode = MAX_MODE_INT;
1777 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1779 /* Avoid the folding if flag_signaling_nans is on and
1780 operand is a signaling NaN. */
1781 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1782 return 0;
1784 d = real_value_truncate (mode, d);
1785 return const_double_from_real_value (d, mode);
1787 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1789 REAL_VALUE_TYPE d;
1791 if (op_mode == VOIDmode)
1793 /* CONST_INT have VOIDmode as the mode. We assume that all
1794 the bits of the constant are significant, though, this is
1795 a dangerous assumption as many times CONST_INTs are
1796 created and used with garbage in the bits outside of the
1797 precision of the implied mode of the const_int. */
1798 op_mode = MAX_MODE_INT;
1801 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1803 /* Avoid the folding if flag_signaling_nans is on and
1804 operand is a signaling NaN. */
1805 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1806 return 0;
1808 d = real_value_truncate (mode, d);
1809 return const_double_from_real_value (d, mode);
1812 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1814 unsigned int width = GET_MODE_PRECISION (result_mode);
1815 wide_int result;
1816 scalar_int_mode imode = (op_mode == VOIDmode
1817 ? result_mode
1818 : as_a <scalar_int_mode> (op_mode));
1819 rtx_mode_t op0 = rtx_mode_t (op, imode);
1820 int int_value;
1822 #if TARGET_SUPPORTS_WIDE_INT == 0
1823 /* This assert keeps the simplification from producing a result
1824 that cannot be represented in a CONST_DOUBLE but a lot of
1825 upstream callers expect that this function never fails to
1826 simplify something and so you if you added this to the test
1827 above the code would die later anyway. If this assert
1828 happens, you just need to make the port support wide int. */
1829 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1830 #endif
1832 switch (code)
1834 case NOT:
1835 result = wi::bit_not (op0);
1836 break;
1838 case NEG:
1839 result = wi::neg (op0);
1840 break;
1842 case ABS:
1843 result = wi::abs (op0);
1844 break;
1846 case FFS:
1847 result = wi::shwi (wi::ffs (op0), result_mode);
1848 break;
1850 case CLZ:
1851 if (wi::ne_p (op0, 0))
1852 int_value = wi::clz (op0);
1853 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1854 int_value = GET_MODE_PRECISION (imode);
1855 result = wi::shwi (int_value, result_mode);
1856 break;
1858 case CLRSB:
1859 result = wi::shwi (wi::clrsb (op0), result_mode);
1860 break;
1862 case CTZ:
1863 if (wi::ne_p (op0, 0))
1864 int_value = wi::ctz (op0);
1865 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1866 int_value = GET_MODE_PRECISION (imode);
1867 result = wi::shwi (int_value, result_mode);
1868 break;
1870 case POPCOUNT:
1871 result = wi::shwi (wi::popcount (op0), result_mode);
1872 break;
1874 case PARITY:
1875 result = wi::shwi (wi::parity (op0), result_mode);
1876 break;
1878 case BSWAP:
1879 result = wide_int (op0).bswap ();
1880 break;
1882 case TRUNCATE:
1883 case ZERO_EXTEND:
1884 result = wide_int::from (op0, width, UNSIGNED);
1885 break;
1887 case SIGN_EXTEND:
1888 result = wide_int::from (op0, width, SIGNED);
1889 break;
1891 case SQRT:
1892 default:
1893 return 0;
1896 return immed_wide_int_const (result, result_mode);
1899 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1900 && SCALAR_FLOAT_MODE_P (mode)
1901 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1903 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1904 switch (code)
1906 case SQRT:
1907 return 0;
1908 case ABS:
1909 d = real_value_abs (&d);
1910 break;
1911 case NEG:
1912 d = real_value_negate (&d);
1913 break;
1914 case FLOAT_TRUNCATE:
1915 /* Don't perform the operation if flag_signaling_nans is on
1916 and the operand is a signaling NaN. */
1917 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1918 return NULL_RTX;
1919 d = real_value_truncate (mode, d);
1920 break;
1921 case FLOAT_EXTEND:
1922 /* Don't perform the operation if flag_signaling_nans is on
1923 and the operand is a signaling NaN. */
1924 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1925 return NULL_RTX;
1926 /* All this does is change the mode, unless changing
1927 mode class. */
1928 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1929 real_convert (&d, mode, &d);
1930 break;
1931 case FIX:
1932 /* Don't perform the operation if flag_signaling_nans is on
1933 and the operand is a signaling NaN. */
1934 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1935 return NULL_RTX;
1936 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1937 break;
1938 case NOT:
1940 long tmp[4];
1941 int i;
1943 real_to_target (tmp, &d, GET_MODE (op));
1944 for (i = 0; i < 4; i++)
1945 tmp[i] = ~tmp[i];
1946 real_from_target (&d, tmp, mode);
1947 break;
1949 default:
1950 gcc_unreachable ();
1952 return const_double_from_real_value (d, mode);
1954 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1955 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1956 && is_int_mode (mode, &result_mode))
1958 unsigned int width = GET_MODE_PRECISION (result_mode);
1959 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1960 operators are intentionally left unspecified (to ease implementation
1961 by target backends), for consistency, this routine implements the
1962 same semantics for constant folding as used by the middle-end. */
1964 /* This was formerly used only for non-IEEE float.
1965 eggert@twinsun.com says it is safe for IEEE also. */
1966 REAL_VALUE_TYPE t;
1967 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1968 wide_int wmax, wmin;
1969 /* This is part of the abi to real_to_integer, but we check
1970 things before making this call. */
1971 bool fail;
1973 switch (code)
1975 case FIX:
1976 if (REAL_VALUE_ISNAN (*x))
1977 return const0_rtx;
1979 /* Test against the signed upper bound. */
1980 wmax = wi::max_value (width, SIGNED);
1981 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1982 if (real_less (&t, x))
1983 return immed_wide_int_const (wmax, mode);
1985 /* Test against the signed lower bound. */
1986 wmin = wi::min_value (width, SIGNED);
1987 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1988 if (real_less (x, &t))
1989 return immed_wide_int_const (wmin, mode);
1991 return immed_wide_int_const (real_to_integer (x, &fail, width),
1992 mode);
1994 case UNSIGNED_FIX:
1995 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1996 return const0_rtx;
1998 /* Test against the unsigned upper bound. */
1999 wmax = wi::max_value (width, UNSIGNED);
2000 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2001 if (real_less (&t, x))
2002 return immed_wide_int_const (wmax, mode);
2004 return immed_wide_int_const (real_to_integer (x, &fail, width),
2005 mode);
2007 default:
2008 gcc_unreachable ();
2012 return NULL_RTX;
2015 /* Subroutine of simplify_binary_operation to simplify a binary operation
2016 CODE that can commute with byte swapping, with result mode MODE and
2017 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2018 Return zero if no simplification or canonicalization is possible. */
2020 static rtx
2021 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2022 rtx op0, rtx op1)
2024 rtx tem;
2026 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2027 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2029 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2030 simplify_gen_unary (BSWAP, mode, op1, mode));
2031 return simplify_gen_unary (BSWAP, mode, tem, mode);
2034 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2035 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2037 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2038 return simplify_gen_unary (BSWAP, mode, tem, mode);
2041 return NULL_RTX;
2044 /* Subroutine of simplify_binary_operation to simplify a commutative,
2045 associative binary operation CODE with result mode MODE, operating
2046 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2047 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2048 canonicalization is possible. */
2050 static rtx
2051 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2052 rtx op0, rtx op1)
2054 rtx tem;
2056 /* Linearize the operator to the left. */
2057 if (GET_CODE (op1) == code)
2059 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2060 if (GET_CODE (op0) == code)
2062 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2063 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2066 /* "a op (b op c)" becomes "(b op c) op a". */
2067 if (! swap_commutative_operands_p (op1, op0))
2068 return simplify_gen_binary (code, mode, op1, op0);
2070 std::swap (op0, op1);
2073 if (GET_CODE (op0) == code)
2075 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2076 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2078 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2079 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2082 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2083 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2084 if (tem != 0)
2085 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2087 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2088 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2089 if (tem != 0)
2090 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2093 return 0;
2097 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2098 and OP1. Return 0 if no simplification is possible.
2100 Don't use this for relational operations such as EQ or LT.
2101 Use simplify_relational_operation instead. */
2103 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2104 rtx op0, rtx op1)
2106 rtx trueop0, trueop1;
2107 rtx tem;
2109 /* Relational operations don't work here. We must know the mode
2110 of the operands in order to do the comparison correctly.
2111 Assuming a full word can give incorrect results.
2112 Consider comparing 128 with -128 in QImode. */
2113 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2114 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2116 /* Make sure the constant is second. */
2117 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2118 && swap_commutative_operands_p (op0, op1))
2119 std::swap (op0, op1);
2121 trueop0 = avoid_constant_pool_reference (op0);
2122 trueop1 = avoid_constant_pool_reference (op1);
2124 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2125 if (tem)
2126 return tem;
2127 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2129 if (tem)
2130 return tem;
2132 /* If the above steps did not result in a simplification and op0 or op1
2133 were constant pool references, use the referenced constants directly. */
2134 if (trueop0 != op0 || trueop1 != op1)
2135 return simplify_gen_binary (code, mode, trueop0, trueop1);
2137 return NULL_RTX;
2140 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2141 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2142 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2143 actual constants. */
2145 static rtx
2146 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2147 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2149 rtx tem, reversed, opleft, opright;
2150 HOST_WIDE_INT val;
2151 scalar_int_mode int_mode, inner_mode;
2153 /* Even if we can't compute a constant result,
2154 there are some cases worth simplifying. */
2156 switch (code)
2158 case PLUS:
2159 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2160 when x is NaN, infinite, or finite and nonzero. They aren't
2161 when x is -0 and the rounding mode is not towards -infinity,
2162 since (-0) + 0 is then 0. */
2163 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2164 return op0;
2166 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2167 transformations are safe even for IEEE. */
2168 if (GET_CODE (op0) == NEG)
2169 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2170 else if (GET_CODE (op1) == NEG)
2171 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2173 /* (~a) + 1 -> -a */
2174 if (INTEGRAL_MODE_P (mode)
2175 && GET_CODE (op0) == NOT
2176 && trueop1 == const1_rtx)
2177 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2179 /* Handle both-operands-constant cases. We can only add
2180 CONST_INTs to constants since the sum of relocatable symbols
2181 can't be handled by most assemblers. Don't add CONST_INT
2182 to CONST_INT since overflow won't be computed properly if wider
2183 than HOST_BITS_PER_WIDE_INT. */
2185 if ((GET_CODE (op0) == CONST
2186 || GET_CODE (op0) == SYMBOL_REF
2187 || GET_CODE (op0) == LABEL_REF)
2188 && CONST_INT_P (op1))
2189 return plus_constant (mode, op0, INTVAL (op1));
2190 else if ((GET_CODE (op1) == CONST
2191 || GET_CODE (op1) == SYMBOL_REF
2192 || GET_CODE (op1) == LABEL_REF)
2193 && CONST_INT_P (op0))
2194 return plus_constant (mode, op1, INTVAL (op0));
2196 /* See if this is something like X * C - X or vice versa or
2197 if the multiplication is written as a shift. If so, we can
2198 distribute and make a new multiply, shift, or maybe just
2199 have X (if C is 2 in the example above). But don't make
2200 something more expensive than we had before. */
2202 if (is_a <scalar_int_mode> (mode, &int_mode))
2204 rtx lhs = op0, rhs = op1;
2206 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2207 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2209 if (GET_CODE (lhs) == NEG)
2211 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2212 lhs = XEXP (lhs, 0);
2214 else if (GET_CODE (lhs) == MULT
2215 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2217 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2218 lhs = XEXP (lhs, 0);
2220 else if (GET_CODE (lhs) == ASHIFT
2221 && CONST_INT_P (XEXP (lhs, 1))
2222 && INTVAL (XEXP (lhs, 1)) >= 0
2223 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2225 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2226 GET_MODE_PRECISION (int_mode));
2227 lhs = XEXP (lhs, 0);
2230 if (GET_CODE (rhs) == NEG)
2232 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2233 rhs = XEXP (rhs, 0);
2235 else if (GET_CODE (rhs) == MULT
2236 && CONST_INT_P (XEXP (rhs, 1)))
2238 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2239 rhs = XEXP (rhs, 0);
2241 else if (GET_CODE (rhs) == ASHIFT
2242 && CONST_INT_P (XEXP (rhs, 1))
2243 && INTVAL (XEXP (rhs, 1)) >= 0
2244 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2246 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2247 GET_MODE_PRECISION (int_mode));
2248 rhs = XEXP (rhs, 0);
2251 if (rtx_equal_p (lhs, rhs))
2253 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2254 rtx coeff;
2255 bool speed = optimize_function_for_speed_p (cfun);
2257 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2259 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2260 return (set_src_cost (tem, int_mode, speed)
2261 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2265 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2266 if (CONST_SCALAR_INT_P (op1)
2267 && GET_CODE (op0) == XOR
2268 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2269 && mode_signbit_p (mode, op1))
2270 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2271 simplify_gen_binary (XOR, mode, op1,
2272 XEXP (op0, 1)));
2274 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2275 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2276 && GET_CODE (op0) == MULT
2277 && GET_CODE (XEXP (op0, 0)) == NEG)
2279 rtx in1, in2;
2281 in1 = XEXP (XEXP (op0, 0), 0);
2282 in2 = XEXP (op0, 1);
2283 return simplify_gen_binary (MINUS, mode, op1,
2284 simplify_gen_binary (MULT, mode,
2285 in1, in2));
2288 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2289 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2290 is 1. */
2291 if (COMPARISON_P (op0)
2292 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2293 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2294 && (reversed = reversed_comparison (op0, mode)))
2295 return
2296 simplify_gen_unary (NEG, mode, reversed, mode);
2298 /* If one of the operands is a PLUS or a MINUS, see if we can
2299 simplify this by the associative law.
2300 Don't use the associative law for floating point.
2301 The inaccuracy makes it nonassociative,
2302 and subtle programs can break if operations are associated. */
2304 if (INTEGRAL_MODE_P (mode)
2305 && (plus_minus_operand_p (op0)
2306 || plus_minus_operand_p (op1))
2307 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2308 return tem;
2310 /* Reassociate floating point addition only when the user
2311 specifies associative math operations. */
2312 if (FLOAT_MODE_P (mode)
2313 && flag_associative_math)
2315 tem = simplify_associative_operation (code, mode, op0, op1);
2316 if (tem)
2317 return tem;
2319 break;
2321 case COMPARE:
2322 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2323 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2324 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2325 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2327 rtx xop00 = XEXP (op0, 0);
2328 rtx xop10 = XEXP (op1, 0);
2330 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2331 return xop00;
2333 if (REG_P (xop00) && REG_P (xop10)
2334 && REGNO (xop00) == REGNO (xop10)
2335 && GET_MODE (xop00) == mode
2336 && GET_MODE (xop10) == mode
2337 && GET_MODE_CLASS (mode) == MODE_CC)
2338 return xop00;
2340 break;
2342 case MINUS:
2343 /* We can't assume x-x is 0 even with non-IEEE floating point,
2344 but since it is zero except in very strange circumstances, we
2345 will treat it as zero with -ffinite-math-only. */
2346 if (rtx_equal_p (trueop0, trueop1)
2347 && ! side_effects_p (op0)
2348 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2349 return CONST0_RTX (mode);
2351 /* Change subtraction from zero into negation. (0 - x) is the
2352 same as -x when x is NaN, infinite, or finite and nonzero.
2353 But if the mode has signed zeros, and does not round towards
2354 -infinity, then 0 - 0 is 0, not -0. */
2355 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2356 return simplify_gen_unary (NEG, mode, op1, mode);
2358 /* (-1 - a) is ~a, unless the expression contains symbolic
2359 constants, in which case not retaining additions and
2360 subtractions could cause invalid assembly to be produced. */
2361 if (trueop0 == constm1_rtx
2362 && !contains_symbolic_reference_p (op1))
2363 return simplify_gen_unary (NOT, mode, op1, mode);
2365 /* Subtracting 0 has no effect unless the mode has signed zeros
2366 and supports rounding towards -infinity. In such a case,
2367 0 - 0 is -0. */
2368 if (!(HONOR_SIGNED_ZEROS (mode)
2369 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2370 && trueop1 == CONST0_RTX (mode))
2371 return op0;
2373 /* See if this is something like X * C - X or vice versa or
2374 if the multiplication is written as a shift. If so, we can
2375 distribute and make a new multiply, shift, or maybe just
2376 have X (if C is 2 in the example above). But don't make
2377 something more expensive than we had before. */
2379 if (is_a <scalar_int_mode> (mode, &int_mode))
2381 rtx lhs = op0, rhs = op1;
2383 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2384 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2386 if (GET_CODE (lhs) == NEG)
2388 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2389 lhs = XEXP (lhs, 0);
2391 else if (GET_CODE (lhs) == MULT
2392 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2394 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2395 lhs = XEXP (lhs, 0);
2397 else if (GET_CODE (lhs) == ASHIFT
2398 && CONST_INT_P (XEXP (lhs, 1))
2399 && INTVAL (XEXP (lhs, 1)) >= 0
2400 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2402 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2403 GET_MODE_PRECISION (int_mode));
2404 lhs = XEXP (lhs, 0);
2407 if (GET_CODE (rhs) == NEG)
2409 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2410 rhs = XEXP (rhs, 0);
2412 else if (GET_CODE (rhs) == MULT
2413 && CONST_INT_P (XEXP (rhs, 1)))
2415 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2416 rhs = XEXP (rhs, 0);
2418 else if (GET_CODE (rhs) == ASHIFT
2419 && CONST_INT_P (XEXP (rhs, 1))
2420 && INTVAL (XEXP (rhs, 1)) >= 0
2421 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2423 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2424 GET_MODE_PRECISION (int_mode));
2425 negcoeff1 = -negcoeff1;
2426 rhs = XEXP (rhs, 0);
2429 if (rtx_equal_p (lhs, rhs))
2431 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2432 rtx coeff;
2433 bool speed = optimize_function_for_speed_p (cfun);
2435 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2437 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2438 return (set_src_cost (tem, int_mode, speed)
2439 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2443 /* (a - (-b)) -> (a + b). True even for IEEE. */
2444 if (GET_CODE (op1) == NEG)
2445 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2447 /* (-x - c) may be simplified as (-c - x). */
2448 if (GET_CODE (op0) == NEG
2449 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2451 tem = simplify_unary_operation (NEG, mode, op1, mode);
2452 if (tem)
2453 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2456 /* Don't let a relocatable value get a negative coeff. */
2457 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2458 return simplify_gen_binary (PLUS, mode,
2459 op0,
2460 neg_const_int (mode, op1));
2462 /* (x - (x & y)) -> (x & ~y) */
2463 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2465 if (rtx_equal_p (op0, XEXP (op1, 0)))
2467 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2468 GET_MODE (XEXP (op1, 1)));
2469 return simplify_gen_binary (AND, mode, op0, tem);
2471 if (rtx_equal_p (op0, XEXP (op1, 1)))
2473 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2474 GET_MODE (XEXP (op1, 0)));
2475 return simplify_gen_binary (AND, mode, op0, tem);
2479 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2480 by reversing the comparison code if valid. */
2481 if (STORE_FLAG_VALUE == 1
2482 && trueop0 == const1_rtx
2483 && COMPARISON_P (op1)
2484 && (reversed = reversed_comparison (op1, mode)))
2485 return reversed;
2487 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2488 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2489 && GET_CODE (op1) == MULT
2490 && GET_CODE (XEXP (op1, 0)) == NEG)
2492 rtx in1, in2;
2494 in1 = XEXP (XEXP (op1, 0), 0);
2495 in2 = XEXP (op1, 1);
2496 return simplify_gen_binary (PLUS, mode,
2497 simplify_gen_binary (MULT, mode,
2498 in1, in2),
2499 op0);
2502 /* Canonicalize (minus (neg A) (mult B C)) to
2503 (minus (mult (neg B) C) A). */
2504 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2505 && GET_CODE (op1) == MULT
2506 && GET_CODE (op0) == NEG)
2508 rtx in1, in2;
2510 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2511 in2 = XEXP (op1, 1);
2512 return simplify_gen_binary (MINUS, mode,
2513 simplify_gen_binary (MULT, mode,
2514 in1, in2),
2515 XEXP (op0, 0));
2518 /* If one of the operands is a PLUS or a MINUS, see if we can
2519 simplify this by the associative law. This will, for example,
2520 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2521 Don't use the associative law for floating point.
2522 The inaccuracy makes it nonassociative,
2523 and subtle programs can break if operations are associated. */
2525 if (INTEGRAL_MODE_P (mode)
2526 && (plus_minus_operand_p (op0)
2527 || plus_minus_operand_p (op1))
2528 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2529 return tem;
2530 break;
2532 case MULT:
2533 if (trueop1 == constm1_rtx)
2534 return simplify_gen_unary (NEG, mode, op0, mode);
2536 if (GET_CODE (op0) == NEG)
2538 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2539 /* If op1 is a MULT as well and simplify_unary_operation
2540 just moved the NEG to the second operand, simplify_gen_binary
2541 below could through simplify_associative_operation move
2542 the NEG around again and recurse endlessly. */
2543 if (temp
2544 && GET_CODE (op1) == MULT
2545 && GET_CODE (temp) == MULT
2546 && XEXP (op1, 0) == XEXP (temp, 0)
2547 && GET_CODE (XEXP (temp, 1)) == NEG
2548 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2549 temp = NULL_RTX;
2550 if (temp)
2551 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2553 if (GET_CODE (op1) == NEG)
2555 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2556 /* If op0 is a MULT as well and simplify_unary_operation
2557 just moved the NEG to the second operand, simplify_gen_binary
2558 below could through simplify_associative_operation move
2559 the NEG around again and recurse endlessly. */
2560 if (temp
2561 && GET_CODE (op0) == MULT
2562 && GET_CODE (temp) == MULT
2563 && XEXP (op0, 0) == XEXP (temp, 0)
2564 && GET_CODE (XEXP (temp, 1)) == NEG
2565 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2566 temp = NULL_RTX;
2567 if (temp)
2568 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2571 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2572 x is NaN, since x * 0 is then also NaN. Nor is it valid
2573 when the mode has signed zeros, since multiplying a negative
2574 number by 0 will give -0, not 0. */
2575 if (!HONOR_NANS (mode)
2576 && !HONOR_SIGNED_ZEROS (mode)
2577 && trueop1 == CONST0_RTX (mode)
2578 && ! side_effects_p (op0))
2579 return op1;
2581 /* In IEEE floating point, x*1 is not equivalent to x for
2582 signalling NaNs. */
2583 if (!HONOR_SNANS (mode)
2584 && trueop1 == CONST1_RTX (mode))
2585 return op0;
2587 /* Convert multiply by constant power of two into shift. */
2588 if (CONST_SCALAR_INT_P (trueop1))
2590 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2591 if (val >= 0)
2592 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2595 /* x*2 is x+x and x*(-1) is -x */
2596 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2597 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2598 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2599 && GET_MODE (op0) == mode)
2601 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2603 if (real_equal (d1, &dconst2))
2604 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2606 if (!HONOR_SNANS (mode)
2607 && real_equal (d1, &dconstm1))
2608 return simplify_gen_unary (NEG, mode, op0, mode);
2611 /* Optimize -x * -x as x * x. */
2612 if (FLOAT_MODE_P (mode)
2613 && GET_CODE (op0) == NEG
2614 && GET_CODE (op1) == NEG
2615 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2616 && !side_effects_p (XEXP (op0, 0)))
2617 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2619 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2620 if (SCALAR_FLOAT_MODE_P (mode)
2621 && GET_CODE (op0) == ABS
2622 && GET_CODE (op1) == ABS
2623 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2624 && !side_effects_p (XEXP (op0, 0)))
2625 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2627 /* Reassociate multiplication, but for floating point MULTs
2628 only when the user specifies unsafe math optimizations. */
2629 if (! FLOAT_MODE_P (mode)
2630 || flag_unsafe_math_optimizations)
2632 tem = simplify_associative_operation (code, mode, op0, op1);
2633 if (tem)
2634 return tem;
2636 break;
2638 case IOR:
2639 if (trueop1 == CONST0_RTX (mode))
2640 return op0;
2641 if (INTEGRAL_MODE_P (mode)
2642 && trueop1 == CONSTM1_RTX (mode)
2643 && !side_effects_p (op0))
2644 return op1;
2645 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2646 return op0;
2647 /* A | (~A) -> -1 */
2648 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2649 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2650 && ! side_effects_p (op0)
2651 && SCALAR_INT_MODE_P (mode))
2652 return constm1_rtx;
2654 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2655 if (CONST_INT_P (op1)
2656 && HWI_COMPUTABLE_MODE_P (mode)
2657 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2658 && !side_effects_p (op0))
2659 return op1;
2661 /* Canonicalize (X & C1) | C2. */
2662 if (GET_CODE (op0) == AND
2663 && CONST_INT_P (trueop1)
2664 && CONST_INT_P (XEXP (op0, 1)))
2666 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2667 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2668 HOST_WIDE_INT c2 = INTVAL (trueop1);
2670 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2671 if ((c1 & c2) == c1
2672 && !side_effects_p (XEXP (op0, 0)))
2673 return trueop1;
2675 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2676 if (((c1|c2) & mask) == mask)
2677 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2680 /* Convert (A & B) | A to A. */
2681 if (GET_CODE (op0) == AND
2682 && (rtx_equal_p (XEXP (op0, 0), op1)
2683 || rtx_equal_p (XEXP (op0, 1), op1))
2684 && ! side_effects_p (XEXP (op0, 0))
2685 && ! side_effects_p (XEXP (op0, 1)))
2686 return op1;
2688 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2689 mode size to (rotate A CX). */
2691 if (GET_CODE (op1) == ASHIFT
2692 || GET_CODE (op1) == SUBREG)
2694 opleft = op1;
2695 opright = op0;
2697 else
2699 opright = op1;
2700 opleft = op0;
2703 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2704 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2705 && CONST_INT_P (XEXP (opleft, 1))
2706 && CONST_INT_P (XEXP (opright, 1))
2707 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2708 == GET_MODE_UNIT_PRECISION (mode)))
2709 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2711 /* Same, but for ashift that has been "simplified" to a wider mode
2712 by simplify_shift_const. */
2714 if (GET_CODE (opleft) == SUBREG
2715 && is_a <scalar_int_mode> (mode, &int_mode)
2716 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2717 &inner_mode)
2718 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2719 && GET_CODE (opright) == LSHIFTRT
2720 && GET_CODE (XEXP (opright, 0)) == SUBREG
2721 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2722 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2723 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2724 SUBREG_REG (XEXP (opright, 0)))
2725 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2726 && CONST_INT_P (XEXP (opright, 1))
2727 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2728 + INTVAL (XEXP (opright, 1))
2729 == GET_MODE_PRECISION (int_mode)))
2730 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2731 XEXP (SUBREG_REG (opleft), 1));
2733 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2734 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2735 the PLUS does not affect any of the bits in OP1: then we can do
2736 the IOR as a PLUS and we can associate. This is valid if OP1
2737 can be safely shifted left C bits. */
2738 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2739 && GET_CODE (XEXP (op0, 0)) == PLUS
2740 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2741 && CONST_INT_P (XEXP (op0, 1))
2742 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2744 int count = INTVAL (XEXP (op0, 1));
2745 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2747 if (mask >> count == INTVAL (trueop1)
2748 && trunc_int_for_mode (mask, mode) == mask
2749 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2750 return simplify_gen_binary (ASHIFTRT, mode,
2751 plus_constant (mode, XEXP (op0, 0),
2752 mask),
2753 XEXP (op0, 1));
2756 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2757 if (tem)
2758 return tem;
2760 tem = simplify_associative_operation (code, mode, op0, op1);
2761 if (tem)
2762 return tem;
2763 break;
2765 case XOR:
2766 if (trueop1 == CONST0_RTX (mode))
2767 return op0;
2768 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2769 return simplify_gen_unary (NOT, mode, op0, mode);
2770 if (rtx_equal_p (trueop0, trueop1)
2771 && ! side_effects_p (op0)
2772 && GET_MODE_CLASS (mode) != MODE_CC)
2773 return CONST0_RTX (mode);
2775 /* Canonicalize XOR of the most significant bit to PLUS. */
2776 if (CONST_SCALAR_INT_P (op1)
2777 && mode_signbit_p (mode, op1))
2778 return simplify_gen_binary (PLUS, mode, op0, op1);
2779 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2780 if (CONST_SCALAR_INT_P (op1)
2781 && GET_CODE (op0) == PLUS
2782 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2783 && mode_signbit_p (mode, XEXP (op0, 1)))
2784 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2785 simplify_gen_binary (XOR, mode, op1,
2786 XEXP (op0, 1)));
2788 /* If we are XORing two things that have no bits in common,
2789 convert them into an IOR. This helps to detect rotation encoded
2790 using those methods and possibly other simplifications. */
2792 if (HWI_COMPUTABLE_MODE_P (mode)
2793 && (nonzero_bits (op0, mode)
2794 & nonzero_bits (op1, mode)) == 0)
2795 return (simplify_gen_binary (IOR, mode, op0, op1));
2797 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2798 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2799 (NOT y). */
2801 int num_negated = 0;
2803 if (GET_CODE (op0) == NOT)
2804 num_negated++, op0 = XEXP (op0, 0);
2805 if (GET_CODE (op1) == NOT)
2806 num_negated++, op1 = XEXP (op1, 0);
2808 if (num_negated == 2)
2809 return simplify_gen_binary (XOR, mode, op0, op1);
2810 else if (num_negated == 1)
2811 return simplify_gen_unary (NOT, mode,
2812 simplify_gen_binary (XOR, mode, op0, op1),
2813 mode);
2816 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2817 correspond to a machine insn or result in further simplifications
2818 if B is a constant. */
2820 if (GET_CODE (op0) == AND
2821 && rtx_equal_p (XEXP (op0, 1), op1)
2822 && ! side_effects_p (op1))
2823 return simplify_gen_binary (AND, mode,
2824 simplify_gen_unary (NOT, mode,
2825 XEXP (op0, 0), mode),
2826 op1);
2828 else if (GET_CODE (op0) == AND
2829 && rtx_equal_p (XEXP (op0, 0), op1)
2830 && ! side_effects_p (op1))
2831 return simplify_gen_binary (AND, mode,
2832 simplify_gen_unary (NOT, mode,
2833 XEXP (op0, 1), mode),
2834 op1);
2836 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2837 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2838 out bits inverted twice and not set by C. Similarly, given
2839 (xor (and (xor A B) C) D), simplify without inverting C in
2840 the xor operand: (xor (and A C) (B&C)^D).
2842 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2843 && GET_CODE (XEXP (op0, 0)) == XOR
2844 && CONST_INT_P (op1)
2845 && CONST_INT_P (XEXP (op0, 1))
2846 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2848 enum rtx_code op = GET_CODE (op0);
2849 rtx a = XEXP (XEXP (op0, 0), 0);
2850 rtx b = XEXP (XEXP (op0, 0), 1);
2851 rtx c = XEXP (op0, 1);
2852 rtx d = op1;
2853 HOST_WIDE_INT bval = INTVAL (b);
2854 HOST_WIDE_INT cval = INTVAL (c);
2855 HOST_WIDE_INT dval = INTVAL (d);
2856 HOST_WIDE_INT xcval;
2858 if (op == IOR)
2859 xcval = ~cval;
2860 else
2861 xcval = cval;
2863 return simplify_gen_binary (XOR, mode,
2864 simplify_gen_binary (op, mode, a, c),
2865 gen_int_mode ((bval & xcval) ^ dval,
2866 mode));
2869 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2870 we can transform like this:
2871 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2872 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2873 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2874 Attempt a few simplifications when B and C are both constants. */
2875 if (GET_CODE (op0) == AND
2876 && CONST_INT_P (op1)
2877 && CONST_INT_P (XEXP (op0, 1)))
2879 rtx a = XEXP (op0, 0);
2880 rtx b = XEXP (op0, 1);
2881 rtx c = op1;
2882 HOST_WIDE_INT bval = INTVAL (b);
2883 HOST_WIDE_INT cval = INTVAL (c);
2885 /* Instead of computing ~A&C, we compute its negated value,
2886 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2887 optimize for sure. If it does not simplify, we still try
2888 to compute ~A&C below, but since that always allocates
2889 RTL, we don't try that before committing to returning a
2890 simplified expression. */
2891 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2892 GEN_INT (~cval));
2894 if ((~cval & bval) == 0)
2896 rtx na_c = NULL_RTX;
2897 if (n_na_c)
2898 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2899 else
2901 /* If ~A does not simplify, don't bother: we don't
2902 want to simplify 2 operations into 3, and if na_c
2903 were to simplify with na, n_na_c would have
2904 simplified as well. */
2905 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2906 if (na)
2907 na_c = simplify_gen_binary (AND, mode, na, c);
2910 /* Try to simplify ~A&C | ~B&C. */
2911 if (na_c != NULL_RTX)
2912 return simplify_gen_binary (IOR, mode, na_c,
2913 gen_int_mode (~bval & cval, mode));
2915 else
2917 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2918 if (n_na_c == CONSTM1_RTX (mode))
2920 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2921 gen_int_mode (~cval & bval,
2922 mode));
2923 return simplify_gen_binary (IOR, mode, a_nc_b,
2924 gen_int_mode (~bval & cval,
2925 mode));
2930 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2931 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2932 machines, and also has shorter instruction path length. */
2933 if (GET_CODE (op0) == AND
2934 && GET_CODE (XEXP (op0, 0)) == XOR
2935 && CONST_INT_P (XEXP (op0, 1))
2936 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2938 rtx a = trueop1;
2939 rtx b = XEXP (XEXP (op0, 0), 1);
2940 rtx c = XEXP (op0, 1);
2941 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2942 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2943 rtx bc = simplify_gen_binary (AND, mode, b, c);
2944 return simplify_gen_binary (IOR, mode, a_nc, bc);
2946 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2947 else if (GET_CODE (op0) == AND
2948 && GET_CODE (XEXP (op0, 0)) == XOR
2949 && CONST_INT_P (XEXP (op0, 1))
2950 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2952 rtx a = XEXP (XEXP (op0, 0), 0);
2953 rtx b = trueop1;
2954 rtx c = XEXP (op0, 1);
2955 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2956 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2957 rtx ac = simplify_gen_binary (AND, mode, a, c);
2958 return simplify_gen_binary (IOR, mode, ac, b_nc);
2961 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2962 comparison if STORE_FLAG_VALUE is 1. */
2963 if (STORE_FLAG_VALUE == 1
2964 && trueop1 == const1_rtx
2965 && COMPARISON_P (op0)
2966 && (reversed = reversed_comparison (op0, mode)))
2967 return reversed;
2969 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2970 is (lt foo (const_int 0)), so we can perform the above
2971 simplification if STORE_FLAG_VALUE is 1. */
2973 if (is_a <scalar_int_mode> (mode, &int_mode)
2974 && STORE_FLAG_VALUE == 1
2975 && trueop1 == const1_rtx
2976 && GET_CODE (op0) == LSHIFTRT
2977 && CONST_INT_P (XEXP (op0, 1))
2978 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
2979 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
2981 /* (xor (comparison foo bar) (const_int sign-bit))
2982 when STORE_FLAG_VALUE is the sign bit. */
2983 if (is_a <scalar_int_mode> (mode, &int_mode)
2984 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
2985 && trueop1 == const_true_rtx
2986 && COMPARISON_P (op0)
2987 && (reversed = reversed_comparison (op0, int_mode)))
2988 return reversed;
2990 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2991 if (tem)
2992 return tem;
2994 tem = simplify_associative_operation (code, mode, op0, op1);
2995 if (tem)
2996 return tem;
2997 break;
2999 case AND:
3000 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3001 return trueop1;
3002 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3003 return op0;
3004 if (HWI_COMPUTABLE_MODE_P (mode))
3006 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3007 HOST_WIDE_INT nzop1;
3008 if (CONST_INT_P (trueop1))
3010 HOST_WIDE_INT val1 = INTVAL (trueop1);
3011 /* If we are turning off bits already known off in OP0, we need
3012 not do an AND. */
3013 if ((nzop0 & ~val1) == 0)
3014 return op0;
3016 nzop1 = nonzero_bits (trueop1, mode);
3017 /* If we are clearing all the nonzero bits, the result is zero. */
3018 if ((nzop1 & nzop0) == 0
3019 && !side_effects_p (op0) && !side_effects_p (op1))
3020 return CONST0_RTX (mode);
3022 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3023 && GET_MODE_CLASS (mode) != MODE_CC)
3024 return op0;
3025 /* A & (~A) -> 0 */
3026 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3027 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3028 && ! side_effects_p (op0)
3029 && GET_MODE_CLASS (mode) != MODE_CC)
3030 return CONST0_RTX (mode);
3032 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3033 there are no nonzero bits of C outside of X's mode. */
3034 if ((GET_CODE (op0) == SIGN_EXTEND
3035 || GET_CODE (op0) == ZERO_EXTEND)
3036 && CONST_INT_P (trueop1)
3037 && HWI_COMPUTABLE_MODE_P (mode)
3038 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3039 & UINTVAL (trueop1)) == 0)
3041 machine_mode imode = GET_MODE (XEXP (op0, 0));
3042 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3043 gen_int_mode (INTVAL (trueop1),
3044 imode));
3045 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3048 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3049 we might be able to further simplify the AND with X and potentially
3050 remove the truncation altogether. */
3051 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3053 rtx x = XEXP (op0, 0);
3054 machine_mode xmode = GET_MODE (x);
3055 tem = simplify_gen_binary (AND, xmode, x,
3056 gen_int_mode (INTVAL (trueop1), xmode));
3057 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3060 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3061 if (GET_CODE (op0) == IOR
3062 && CONST_INT_P (trueop1)
3063 && CONST_INT_P (XEXP (op0, 1)))
3065 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3066 return simplify_gen_binary (IOR, mode,
3067 simplify_gen_binary (AND, mode,
3068 XEXP (op0, 0), op1),
3069 gen_int_mode (tmp, mode));
3072 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3073 insn (and may simplify more). */
3074 if (GET_CODE (op0) == XOR
3075 && rtx_equal_p (XEXP (op0, 0), op1)
3076 && ! side_effects_p (op1))
3077 return simplify_gen_binary (AND, mode,
3078 simplify_gen_unary (NOT, mode,
3079 XEXP (op0, 1), mode),
3080 op1);
3082 if (GET_CODE (op0) == XOR
3083 && rtx_equal_p (XEXP (op0, 1), op1)
3084 && ! side_effects_p (op1))
3085 return simplify_gen_binary (AND, mode,
3086 simplify_gen_unary (NOT, mode,
3087 XEXP (op0, 0), mode),
3088 op1);
3090 /* Similarly for (~(A ^ B)) & A. */
3091 if (GET_CODE (op0) == NOT
3092 && GET_CODE (XEXP (op0, 0)) == XOR
3093 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3094 && ! side_effects_p (op1))
3095 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3097 if (GET_CODE (op0) == NOT
3098 && GET_CODE (XEXP (op0, 0)) == XOR
3099 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3100 && ! side_effects_p (op1))
3101 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3103 /* Convert (A | B) & A to A. */
3104 if (GET_CODE (op0) == IOR
3105 && (rtx_equal_p (XEXP (op0, 0), op1)
3106 || rtx_equal_p (XEXP (op0, 1), op1))
3107 && ! side_effects_p (XEXP (op0, 0))
3108 && ! side_effects_p (XEXP (op0, 1)))
3109 return op1;
3111 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3112 ((A & N) + B) & M -> (A + B) & M
3113 Similarly if (N & M) == 0,
3114 ((A | N) + B) & M -> (A + B) & M
3115 and for - instead of + and/or ^ instead of |.
3116 Also, if (N & M) == 0, then
3117 (A +- N) & M -> A & M. */
3118 if (CONST_INT_P (trueop1)
3119 && HWI_COMPUTABLE_MODE_P (mode)
3120 && ~UINTVAL (trueop1)
3121 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3122 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3124 rtx pmop[2];
3125 int which;
3127 pmop[0] = XEXP (op0, 0);
3128 pmop[1] = XEXP (op0, 1);
3130 if (CONST_INT_P (pmop[1])
3131 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3132 return simplify_gen_binary (AND, mode, pmop[0], op1);
3134 for (which = 0; which < 2; which++)
3136 tem = pmop[which];
3137 switch (GET_CODE (tem))
3139 case AND:
3140 if (CONST_INT_P (XEXP (tem, 1))
3141 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3142 == UINTVAL (trueop1))
3143 pmop[which] = XEXP (tem, 0);
3144 break;
3145 case IOR:
3146 case XOR:
3147 if (CONST_INT_P (XEXP (tem, 1))
3148 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3149 pmop[which] = XEXP (tem, 0);
3150 break;
3151 default:
3152 break;
3156 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3158 tem = simplify_gen_binary (GET_CODE (op0), mode,
3159 pmop[0], pmop[1]);
3160 return simplify_gen_binary (code, mode, tem, op1);
3164 /* (and X (ior (not X) Y) -> (and X Y) */
3165 if (GET_CODE (op1) == IOR
3166 && GET_CODE (XEXP (op1, 0)) == NOT
3167 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3168 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3170 /* (and (ior (not X) Y) X) -> (and X Y) */
3171 if (GET_CODE (op0) == IOR
3172 && GET_CODE (XEXP (op0, 0)) == NOT
3173 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3174 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3176 /* (and X (ior Y (not X)) -> (and X Y) */
3177 if (GET_CODE (op1) == IOR
3178 && GET_CODE (XEXP (op1, 1)) == NOT
3179 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3180 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3182 /* (and (ior Y (not X)) X) -> (and X Y) */
3183 if (GET_CODE (op0) == IOR
3184 && GET_CODE (XEXP (op0, 1)) == NOT
3185 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3186 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3188 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3189 if (tem)
3190 return tem;
3192 tem = simplify_associative_operation (code, mode, op0, op1);
3193 if (tem)
3194 return tem;
3195 break;
3197 case UDIV:
3198 /* 0/x is 0 (or x&0 if x has side-effects). */
3199 if (trueop0 == CONST0_RTX (mode)
3200 && !cfun->can_throw_non_call_exceptions)
3202 if (side_effects_p (op1))
3203 return simplify_gen_binary (AND, mode, op1, trueop0);
3204 return trueop0;
3206 /* x/1 is x. */
3207 if (trueop1 == CONST1_RTX (mode))
3209 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3210 if (tem)
3211 return tem;
3213 /* Convert divide by power of two into shift. */
3214 if (CONST_INT_P (trueop1)
3215 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3216 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3217 break;
3219 case DIV:
3220 /* Handle floating point and integers separately. */
3221 if (SCALAR_FLOAT_MODE_P (mode))
3223 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3224 safe for modes with NaNs, since 0.0 / 0.0 will then be
3225 NaN rather than 0.0. Nor is it safe for modes with signed
3226 zeros, since dividing 0 by a negative number gives -0.0 */
3227 if (trueop0 == CONST0_RTX (mode)
3228 && !HONOR_NANS (mode)
3229 && !HONOR_SIGNED_ZEROS (mode)
3230 && ! side_effects_p (op1))
3231 return op0;
3232 /* x/1.0 is x. */
3233 if (trueop1 == CONST1_RTX (mode)
3234 && !HONOR_SNANS (mode))
3235 return op0;
3237 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3238 && trueop1 != CONST0_RTX (mode))
3240 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3242 /* x/-1.0 is -x. */
3243 if (real_equal (d1, &dconstm1)
3244 && !HONOR_SNANS (mode))
3245 return simplify_gen_unary (NEG, mode, op0, mode);
3247 /* Change FP division by a constant into multiplication.
3248 Only do this with -freciprocal-math. */
3249 if (flag_reciprocal_math
3250 && !real_equal (d1, &dconst0))
3252 REAL_VALUE_TYPE d;
3253 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3254 tem = const_double_from_real_value (d, mode);
3255 return simplify_gen_binary (MULT, mode, op0, tem);
3259 else if (SCALAR_INT_MODE_P (mode))
3261 /* 0/x is 0 (or x&0 if x has side-effects). */
3262 if (trueop0 == CONST0_RTX (mode)
3263 && !cfun->can_throw_non_call_exceptions)
3265 if (side_effects_p (op1))
3266 return simplify_gen_binary (AND, mode, op1, trueop0);
3267 return trueop0;
3269 /* x/1 is x. */
3270 if (trueop1 == CONST1_RTX (mode))
3272 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3273 if (tem)
3274 return tem;
3276 /* x/-1 is -x. */
3277 if (trueop1 == constm1_rtx)
3279 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3280 if (x)
3281 return simplify_gen_unary (NEG, mode, x, mode);
3284 break;
3286 case UMOD:
3287 /* 0%x is 0 (or x&0 if x has side-effects). */
3288 if (trueop0 == CONST0_RTX (mode))
3290 if (side_effects_p (op1))
3291 return simplify_gen_binary (AND, mode, op1, trueop0);
3292 return trueop0;
3294 /* x%1 is 0 (of x&0 if x has side-effects). */
3295 if (trueop1 == CONST1_RTX (mode))
3297 if (side_effects_p (op0))
3298 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3299 return CONST0_RTX (mode);
3301 /* Implement modulus by power of two as AND. */
3302 if (CONST_INT_P (trueop1)
3303 && exact_log2 (UINTVAL (trueop1)) > 0)
3304 return simplify_gen_binary (AND, mode, op0,
3305 gen_int_mode (INTVAL (op1) - 1, mode));
3306 break;
3308 case MOD:
3309 /* 0%x is 0 (or x&0 if x has side-effects). */
3310 if (trueop0 == CONST0_RTX (mode))
3312 if (side_effects_p (op1))
3313 return simplify_gen_binary (AND, mode, op1, trueop0);
3314 return trueop0;
3316 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3317 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3319 if (side_effects_p (op0))
3320 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3321 return CONST0_RTX (mode);
3323 break;
3325 case ROTATERT:
3326 case ROTATE:
3327 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3328 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3329 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3330 amount instead. */
3331 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3332 if (CONST_INT_P (trueop1)
3333 && IN_RANGE (INTVAL (trueop1),
3334 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3335 GET_MODE_UNIT_PRECISION (mode) - 1))
3336 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3337 mode, op0,
3338 GEN_INT (GET_MODE_UNIT_PRECISION (mode)
3339 - INTVAL (trueop1)));
3340 #endif
3341 /* FALLTHRU */
3342 case ASHIFTRT:
3343 if (trueop1 == CONST0_RTX (mode))
3344 return op0;
3345 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3346 return op0;
3347 /* Rotating ~0 always results in ~0. */
3348 if (CONST_INT_P (trueop0)
3349 && HWI_COMPUTABLE_MODE_P (mode)
3350 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3351 && ! side_effects_p (op1))
3352 return op0;
3354 canonicalize_shift:
3355 /* Given:
3356 scalar modes M1, M2
3357 scalar constants c1, c2
3358 size (M2) > size (M1)
3359 c1 == size (M2) - size (M1)
3360 optimize:
3361 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3362 <low_part>)
3363 (const_int <c2>))
3365 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3366 <low_part>). */
3367 if ((code == ASHIFTRT || code == LSHIFTRT)
3368 && is_a <scalar_int_mode> (mode, &int_mode)
3369 && SUBREG_P (op0)
3370 && CONST_INT_P (op1)
3371 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3372 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3373 &inner_mode)
3374 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3375 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3376 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3377 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3378 && subreg_lowpart_p (op0))
3380 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3381 + INTVAL (op1));
3382 tmp = simplify_gen_binary (code, inner_mode,
3383 XEXP (SUBREG_REG (op0), 0),
3384 tmp);
3385 return lowpart_subreg (int_mode, tmp, inner_mode);
3388 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3390 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3391 if (val != INTVAL (op1))
3392 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3394 break;
3396 case ASHIFT:
3397 case SS_ASHIFT:
3398 case US_ASHIFT:
3399 if (trueop1 == CONST0_RTX (mode))
3400 return op0;
3401 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3402 return op0;
3403 goto canonicalize_shift;
3405 case LSHIFTRT:
3406 if (trueop1 == CONST0_RTX (mode))
3407 return op0;
3408 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3409 return op0;
3410 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3411 if (GET_CODE (op0) == CLZ
3412 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3413 && CONST_INT_P (trueop1)
3414 && STORE_FLAG_VALUE == 1
3415 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3417 unsigned HOST_WIDE_INT zero_val = 0;
3419 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3420 && zero_val == GET_MODE_PRECISION (inner_mode)
3421 && INTVAL (trueop1) == exact_log2 (zero_val))
3422 return simplify_gen_relational (EQ, mode, inner_mode,
3423 XEXP (op0, 0), const0_rtx);
3425 goto canonicalize_shift;
3427 case SMIN:
3428 if (HWI_COMPUTABLE_MODE_P (mode)
3429 && mode_signbit_p (mode, trueop1)
3430 && ! side_effects_p (op0))
3431 return op1;
3432 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3433 return op0;
3434 tem = simplify_associative_operation (code, mode, op0, op1);
3435 if (tem)
3436 return tem;
3437 break;
3439 case SMAX:
3440 if (HWI_COMPUTABLE_MODE_P (mode)
3441 && CONST_INT_P (trueop1)
3442 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3443 && ! side_effects_p (op0))
3444 return op1;
3445 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3446 return op0;
3447 tem = simplify_associative_operation (code, mode, op0, op1);
3448 if (tem)
3449 return tem;
3450 break;
3452 case UMIN:
3453 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3454 return op1;
3455 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3456 return op0;
3457 tem = simplify_associative_operation (code, mode, op0, op1);
3458 if (tem)
3459 return tem;
3460 break;
3462 case UMAX:
3463 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3464 return op1;
3465 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3466 return op0;
3467 tem = simplify_associative_operation (code, mode, op0, op1);
3468 if (tem)
3469 return tem;
3470 break;
3472 case SS_PLUS:
3473 case US_PLUS:
3474 case SS_MINUS:
3475 case US_MINUS:
3476 case SS_MULT:
3477 case US_MULT:
3478 case SS_DIV:
3479 case US_DIV:
3480 /* ??? There are simplifications that can be done. */
3481 return 0;
3483 case VEC_SELECT:
3484 if (!VECTOR_MODE_P (mode))
3486 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3487 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3488 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3489 gcc_assert (XVECLEN (trueop1, 0) == 1);
3490 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3492 if (GET_CODE (trueop0) == CONST_VECTOR)
3493 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3494 (trueop1, 0, 0)));
3496 /* Extract a scalar element from a nested VEC_SELECT expression
3497 (with optional nested VEC_CONCAT expression). Some targets
3498 (i386) extract scalar element from a vector using chain of
3499 nested VEC_SELECT expressions. When input operand is a memory
3500 operand, this operation can be simplified to a simple scalar
3501 load from an offseted memory address. */
3502 if (GET_CODE (trueop0) == VEC_SELECT)
3504 rtx op0 = XEXP (trueop0, 0);
3505 rtx op1 = XEXP (trueop0, 1);
3507 machine_mode opmode = GET_MODE (op0);
3508 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3509 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3511 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3512 int elem;
3514 rtvec vec;
3515 rtx tmp_op, tmp;
3517 gcc_assert (GET_CODE (op1) == PARALLEL);
3518 gcc_assert (i < n_elts);
3520 /* Select element, pointed by nested selector. */
3521 elem = INTVAL (XVECEXP (op1, 0, i));
3523 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3524 if (GET_CODE (op0) == VEC_CONCAT)
3526 rtx op00 = XEXP (op0, 0);
3527 rtx op01 = XEXP (op0, 1);
3529 machine_mode mode00, mode01;
3530 int n_elts00, n_elts01;
3532 mode00 = GET_MODE (op00);
3533 mode01 = GET_MODE (op01);
3535 /* Find out number of elements of each operand. */
3536 if (VECTOR_MODE_P (mode00))
3538 elt_size = GET_MODE_UNIT_SIZE (mode00);
3539 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3541 else
3542 n_elts00 = 1;
3544 if (VECTOR_MODE_P (mode01))
3546 elt_size = GET_MODE_UNIT_SIZE (mode01);
3547 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3549 else
3550 n_elts01 = 1;
3552 gcc_assert (n_elts == n_elts00 + n_elts01);
3554 /* Select correct operand of VEC_CONCAT
3555 and adjust selector. */
3556 if (elem < n_elts01)
3557 tmp_op = op00;
3558 else
3560 tmp_op = op01;
3561 elem -= n_elts00;
3564 else
3565 tmp_op = op0;
3567 vec = rtvec_alloc (1);
3568 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3570 tmp = gen_rtx_fmt_ee (code, mode,
3571 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3572 return tmp;
3574 if (GET_CODE (trueop0) == VEC_DUPLICATE
3575 && GET_MODE (XEXP (trueop0, 0)) == mode)
3576 return XEXP (trueop0, 0);
3578 else
3580 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3581 gcc_assert (GET_MODE_INNER (mode)
3582 == GET_MODE_INNER (GET_MODE (trueop0)));
3583 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3585 if (GET_CODE (trueop0) == CONST_VECTOR)
3587 int elt_size = GET_MODE_UNIT_SIZE (mode);
3588 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3589 rtvec v = rtvec_alloc (n_elts);
3590 unsigned int i;
3592 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3593 for (i = 0; i < n_elts; i++)
3595 rtx x = XVECEXP (trueop1, 0, i);
3597 gcc_assert (CONST_INT_P (x));
3598 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3599 INTVAL (x));
3602 return gen_rtx_CONST_VECTOR (mode, v);
3605 /* Recognize the identity. */
3606 if (GET_MODE (trueop0) == mode)
3608 bool maybe_ident = true;
3609 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3611 rtx j = XVECEXP (trueop1, 0, i);
3612 if (!CONST_INT_P (j) || INTVAL (j) != i)
3614 maybe_ident = false;
3615 break;
3618 if (maybe_ident)
3619 return trueop0;
3622 /* If we build {a,b} then permute it, build the result directly. */
3623 if (XVECLEN (trueop1, 0) == 2
3624 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3625 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3626 && GET_CODE (trueop0) == VEC_CONCAT
3627 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3628 && GET_MODE (XEXP (trueop0, 0)) == mode
3629 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3630 && GET_MODE (XEXP (trueop0, 1)) == mode)
3632 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3633 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3634 rtx subop0, subop1;
3636 gcc_assert (i0 < 4 && i1 < 4);
3637 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3638 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3640 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3643 if (XVECLEN (trueop1, 0) == 2
3644 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3645 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3646 && GET_CODE (trueop0) == VEC_CONCAT
3647 && GET_MODE (trueop0) == mode)
3649 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3650 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3651 rtx subop0, subop1;
3653 gcc_assert (i0 < 2 && i1 < 2);
3654 subop0 = XEXP (trueop0, i0);
3655 subop1 = XEXP (trueop0, i1);
3657 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3660 /* If we select one half of a vec_concat, return that. */
3661 if (GET_CODE (trueop0) == VEC_CONCAT
3662 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3664 rtx subop0 = XEXP (trueop0, 0);
3665 rtx subop1 = XEXP (trueop0, 1);
3666 machine_mode mode0 = GET_MODE (subop0);
3667 machine_mode mode1 = GET_MODE (subop1);
3668 int li = GET_MODE_UNIT_SIZE (mode0);
3669 int l0 = GET_MODE_SIZE (mode0) / li;
3670 int l1 = GET_MODE_SIZE (mode1) / li;
3671 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3672 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3674 bool success = true;
3675 for (int i = 1; i < l0; ++i)
3677 rtx j = XVECEXP (trueop1, 0, i);
3678 if (!CONST_INT_P (j) || INTVAL (j) != i)
3680 success = false;
3681 break;
3684 if (success)
3685 return subop0;
3687 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3689 bool success = true;
3690 for (int i = 1; i < l1; ++i)
3692 rtx j = XVECEXP (trueop1, 0, i);
3693 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3695 success = false;
3696 break;
3699 if (success)
3700 return subop1;
3705 if (XVECLEN (trueop1, 0) == 1
3706 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3707 && GET_CODE (trueop0) == VEC_CONCAT)
3709 rtx vec = trueop0;
3710 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3712 /* Try to find the element in the VEC_CONCAT. */
3713 while (GET_MODE (vec) != mode
3714 && GET_CODE (vec) == VEC_CONCAT)
3716 HOST_WIDE_INT vec_size;
3718 if (CONST_INT_P (XEXP (vec, 0)))
3720 /* vec_concat of two const_ints doesn't make sense with
3721 respect to modes. */
3722 if (CONST_INT_P (XEXP (vec, 1)))
3723 return 0;
3725 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3726 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3728 else
3729 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3731 if (offset < vec_size)
3732 vec = XEXP (vec, 0);
3733 else
3735 offset -= vec_size;
3736 vec = XEXP (vec, 1);
3738 vec = avoid_constant_pool_reference (vec);
3741 if (GET_MODE (vec) == mode)
3742 return vec;
3745 /* If we select elements in a vec_merge that all come from the same
3746 operand, select from that operand directly. */
3747 if (GET_CODE (op0) == VEC_MERGE)
3749 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3750 if (CONST_INT_P (trueop02))
3752 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3753 bool all_operand0 = true;
3754 bool all_operand1 = true;
3755 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3757 rtx j = XVECEXP (trueop1, 0, i);
3758 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3759 all_operand1 = false;
3760 else
3761 all_operand0 = false;
3763 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3764 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3765 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3766 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3770 /* If we have two nested selects that are inverses of each
3771 other, replace them with the source operand. */
3772 if (GET_CODE (trueop0) == VEC_SELECT
3773 && GET_MODE (XEXP (trueop0, 0)) == mode)
3775 rtx op0_subop1 = XEXP (trueop0, 1);
3776 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3777 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3779 /* Apply the outer ordering vector to the inner one. (The inner
3780 ordering vector is expressly permitted to be of a different
3781 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3782 then the two VEC_SELECTs cancel. */
3783 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3785 rtx x = XVECEXP (trueop1, 0, i);
3786 if (!CONST_INT_P (x))
3787 return 0;
3788 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3789 if (!CONST_INT_P (y) || i != INTVAL (y))
3790 return 0;
3792 return XEXP (trueop0, 0);
3795 return 0;
3796 case VEC_CONCAT:
3798 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3799 ? GET_MODE (trueop0)
3800 : GET_MODE_INNER (mode));
3801 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3802 ? GET_MODE (trueop1)
3803 : GET_MODE_INNER (mode));
3805 gcc_assert (VECTOR_MODE_P (mode));
3806 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3807 == GET_MODE_SIZE (mode));
3809 if (VECTOR_MODE_P (op0_mode))
3810 gcc_assert (GET_MODE_INNER (mode)
3811 == GET_MODE_INNER (op0_mode));
3812 else
3813 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3815 if (VECTOR_MODE_P (op1_mode))
3816 gcc_assert (GET_MODE_INNER (mode)
3817 == GET_MODE_INNER (op1_mode));
3818 else
3819 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3821 if ((GET_CODE (trueop0) == CONST_VECTOR
3822 || CONST_SCALAR_INT_P (trueop0)
3823 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3824 && (GET_CODE (trueop1) == CONST_VECTOR
3825 || CONST_SCALAR_INT_P (trueop1)
3826 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3828 int elt_size = GET_MODE_UNIT_SIZE (mode);
3829 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3830 rtvec v = rtvec_alloc (n_elts);
3831 unsigned int i;
3832 unsigned in_n_elts = 1;
3834 if (VECTOR_MODE_P (op0_mode))
3835 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3836 for (i = 0; i < n_elts; i++)
3838 if (i < in_n_elts)
3840 if (!VECTOR_MODE_P (op0_mode))
3841 RTVEC_ELT (v, i) = trueop0;
3842 else
3843 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3845 else
3847 if (!VECTOR_MODE_P (op1_mode))
3848 RTVEC_ELT (v, i) = trueop1;
3849 else
3850 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3851 i - in_n_elts);
3855 return gen_rtx_CONST_VECTOR (mode, v);
3858 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3859 Restrict the transformation to avoid generating a VEC_SELECT with a
3860 mode unrelated to its operand. */
3861 if (GET_CODE (trueop0) == VEC_SELECT
3862 && GET_CODE (trueop1) == VEC_SELECT
3863 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3864 && GET_MODE (XEXP (trueop0, 0)) == mode)
3866 rtx par0 = XEXP (trueop0, 1);
3867 rtx par1 = XEXP (trueop1, 1);
3868 int len0 = XVECLEN (par0, 0);
3869 int len1 = XVECLEN (par1, 0);
3870 rtvec vec = rtvec_alloc (len0 + len1);
3871 for (int i = 0; i < len0; i++)
3872 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3873 for (int i = 0; i < len1; i++)
3874 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3875 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3876 gen_rtx_PARALLEL (VOIDmode, vec));
3879 return 0;
3881 default:
3882 gcc_unreachable ();
3885 return 0;
3889 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3890 rtx op0, rtx op1)
3892 if (VECTOR_MODE_P (mode)
3893 && code != VEC_CONCAT
3894 && GET_CODE (op0) == CONST_VECTOR
3895 && GET_CODE (op1) == CONST_VECTOR)
3897 unsigned n_elts = GET_MODE_NUNITS (mode);
3898 machine_mode op0mode = GET_MODE (op0);
3899 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3900 machine_mode op1mode = GET_MODE (op1);
3901 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3902 rtvec v = rtvec_alloc (n_elts);
3903 unsigned int i;
3905 gcc_assert (op0_n_elts == n_elts);
3906 gcc_assert (op1_n_elts == n_elts);
3907 for (i = 0; i < n_elts; i++)
3909 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3910 CONST_VECTOR_ELT (op0, i),
3911 CONST_VECTOR_ELT (op1, i));
3912 if (!x)
3913 return 0;
3914 RTVEC_ELT (v, i) = x;
3917 return gen_rtx_CONST_VECTOR (mode, v);
3920 if (VECTOR_MODE_P (mode)
3921 && code == VEC_CONCAT
3922 && (CONST_SCALAR_INT_P (op0)
3923 || GET_CODE (op0) == CONST_FIXED
3924 || CONST_DOUBLE_AS_FLOAT_P (op0))
3925 && (CONST_SCALAR_INT_P (op1)
3926 || CONST_DOUBLE_AS_FLOAT_P (op1)
3927 || GET_CODE (op1) == CONST_FIXED))
3929 unsigned n_elts = GET_MODE_NUNITS (mode);
3930 rtvec v = rtvec_alloc (n_elts);
3932 gcc_assert (n_elts >= 2);
3933 if (n_elts == 2)
3935 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3936 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3938 RTVEC_ELT (v, 0) = op0;
3939 RTVEC_ELT (v, 1) = op1;
3941 else
3943 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3944 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3945 unsigned i;
3947 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3948 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3949 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3951 for (i = 0; i < op0_n_elts; ++i)
3952 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3953 for (i = 0; i < op1_n_elts; ++i)
3954 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3957 return gen_rtx_CONST_VECTOR (mode, v);
3960 if (SCALAR_FLOAT_MODE_P (mode)
3961 && CONST_DOUBLE_AS_FLOAT_P (op0)
3962 && CONST_DOUBLE_AS_FLOAT_P (op1)
3963 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3965 if (code == AND
3966 || code == IOR
3967 || code == XOR)
3969 long tmp0[4];
3970 long tmp1[4];
3971 REAL_VALUE_TYPE r;
3972 int i;
3974 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3975 GET_MODE (op0));
3976 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3977 GET_MODE (op1));
3978 for (i = 0; i < 4; i++)
3980 switch (code)
3982 case AND:
3983 tmp0[i] &= tmp1[i];
3984 break;
3985 case IOR:
3986 tmp0[i] |= tmp1[i];
3987 break;
3988 case XOR:
3989 tmp0[i] ^= tmp1[i];
3990 break;
3991 default:
3992 gcc_unreachable ();
3995 real_from_target (&r, tmp0, mode);
3996 return const_double_from_real_value (r, mode);
3998 else
4000 REAL_VALUE_TYPE f0, f1, value, result;
4001 const REAL_VALUE_TYPE *opr0, *opr1;
4002 bool inexact;
4004 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4005 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4007 if (HONOR_SNANS (mode)
4008 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4009 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4010 return 0;
4012 real_convert (&f0, mode, opr0);
4013 real_convert (&f1, mode, opr1);
4015 if (code == DIV
4016 && real_equal (&f1, &dconst0)
4017 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4018 return 0;
4020 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4021 && flag_trapping_math
4022 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4024 int s0 = REAL_VALUE_NEGATIVE (f0);
4025 int s1 = REAL_VALUE_NEGATIVE (f1);
4027 switch (code)
4029 case PLUS:
4030 /* Inf + -Inf = NaN plus exception. */
4031 if (s0 != s1)
4032 return 0;
4033 break;
4034 case MINUS:
4035 /* Inf - Inf = NaN plus exception. */
4036 if (s0 == s1)
4037 return 0;
4038 break;
4039 case DIV:
4040 /* Inf / Inf = NaN plus exception. */
4041 return 0;
4042 default:
4043 break;
4047 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4048 && flag_trapping_math
4049 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4050 || (REAL_VALUE_ISINF (f1)
4051 && real_equal (&f0, &dconst0))))
4052 /* Inf * 0 = NaN plus exception. */
4053 return 0;
4055 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4056 &f0, &f1);
4057 real_convert (&result, mode, &value);
4059 /* Don't constant fold this floating point operation if
4060 the result has overflowed and flag_trapping_math. */
4062 if (flag_trapping_math
4063 && MODE_HAS_INFINITIES (mode)
4064 && REAL_VALUE_ISINF (result)
4065 && !REAL_VALUE_ISINF (f0)
4066 && !REAL_VALUE_ISINF (f1))
4067 /* Overflow plus exception. */
4068 return 0;
4070 /* Don't constant fold this floating point operation if the
4071 result may dependent upon the run-time rounding mode and
4072 flag_rounding_math is set, or if GCC's software emulation
4073 is unable to accurately represent the result. */
4075 if ((flag_rounding_math
4076 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4077 && (inexact || !real_identical (&result, &value)))
4078 return NULL_RTX;
4080 return const_double_from_real_value (result, mode);
4084 /* We can fold some multi-word operations. */
4085 scalar_int_mode int_mode;
4086 if (is_a <scalar_int_mode> (mode, &int_mode)
4087 && CONST_SCALAR_INT_P (op0)
4088 && CONST_SCALAR_INT_P (op1))
4090 wide_int result;
4091 bool overflow;
4092 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4093 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4095 #if TARGET_SUPPORTS_WIDE_INT == 0
4096 /* This assert keeps the simplification from producing a result
4097 that cannot be represented in a CONST_DOUBLE but a lot of
4098 upstream callers expect that this function never fails to
4099 simplify something and so you if you added this to the test
4100 above the code would die later anyway. If this assert
4101 happens, you just need to make the port support wide int. */
4102 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4103 #endif
4104 switch (code)
4106 case MINUS:
4107 result = wi::sub (pop0, pop1);
4108 break;
4110 case PLUS:
4111 result = wi::add (pop0, pop1);
4112 break;
4114 case MULT:
4115 result = wi::mul (pop0, pop1);
4116 break;
4118 case DIV:
4119 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4120 if (overflow)
4121 return NULL_RTX;
4122 break;
4124 case MOD:
4125 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4126 if (overflow)
4127 return NULL_RTX;
4128 break;
4130 case UDIV:
4131 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4132 if (overflow)
4133 return NULL_RTX;
4134 break;
4136 case UMOD:
4137 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4138 if (overflow)
4139 return NULL_RTX;
4140 break;
4142 case AND:
4143 result = wi::bit_and (pop0, pop1);
4144 break;
4146 case IOR:
4147 result = wi::bit_or (pop0, pop1);
4148 break;
4150 case XOR:
4151 result = wi::bit_xor (pop0, pop1);
4152 break;
4154 case SMIN:
4155 result = wi::smin (pop0, pop1);
4156 break;
4158 case SMAX:
4159 result = wi::smax (pop0, pop1);
4160 break;
4162 case UMIN:
4163 result = wi::umin (pop0, pop1);
4164 break;
4166 case UMAX:
4167 result = wi::umax (pop0, pop1);
4168 break;
4170 case LSHIFTRT:
4171 case ASHIFTRT:
4172 case ASHIFT:
4174 wide_int wop1 = pop1;
4175 if (SHIFT_COUNT_TRUNCATED)
4176 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4177 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4178 return NULL_RTX;
4180 switch (code)
4182 case LSHIFTRT:
4183 result = wi::lrshift (pop0, wop1);
4184 break;
4186 case ASHIFTRT:
4187 result = wi::arshift (pop0, wop1);
4188 break;
4190 case ASHIFT:
4191 result = wi::lshift (pop0, wop1);
4192 break;
4194 default:
4195 gcc_unreachable ();
4197 break;
4199 case ROTATE:
4200 case ROTATERT:
4202 if (wi::neg_p (pop1))
4203 return NULL_RTX;
4205 switch (code)
4207 case ROTATE:
4208 result = wi::lrotate (pop0, pop1);
4209 break;
4211 case ROTATERT:
4212 result = wi::rrotate (pop0, pop1);
4213 break;
4215 default:
4216 gcc_unreachable ();
4218 break;
4220 default:
4221 return NULL_RTX;
4223 return immed_wide_int_const (result, int_mode);
4226 return NULL_RTX;
4231 /* Return a positive integer if X should sort after Y. The value
4232 returned is 1 if and only if X and Y are both regs. */
4234 static int
4235 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4237 int result;
4239 result = (commutative_operand_precedence (y)
4240 - commutative_operand_precedence (x));
4241 if (result)
4242 return result + result;
4244 /* Group together equal REGs to do more simplification. */
4245 if (REG_P (x) && REG_P (y))
4246 return REGNO (x) > REGNO (y);
4248 return 0;
4251 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4252 operands may be another PLUS or MINUS.
4254 Rather than test for specific case, we do this by a brute-force method
4255 and do all possible simplifications until no more changes occur. Then
4256 we rebuild the operation.
4258 May return NULL_RTX when no changes were made. */
4260 static rtx
4261 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4262 rtx op1)
4264 struct simplify_plus_minus_op_data
4266 rtx op;
4267 short neg;
4268 } ops[16];
4269 rtx result, tem;
4270 int n_ops = 2;
4271 int changed, n_constants, canonicalized = 0;
4272 int i, j;
4274 memset (ops, 0, sizeof ops);
4276 /* Set up the two operands and then expand them until nothing has been
4277 changed. If we run out of room in our array, give up; this should
4278 almost never happen. */
4280 ops[0].op = op0;
4281 ops[0].neg = 0;
4282 ops[1].op = op1;
4283 ops[1].neg = (code == MINUS);
4287 changed = 0;
4288 n_constants = 0;
4290 for (i = 0; i < n_ops; i++)
4292 rtx this_op = ops[i].op;
4293 int this_neg = ops[i].neg;
4294 enum rtx_code this_code = GET_CODE (this_op);
4296 switch (this_code)
4298 case PLUS:
4299 case MINUS:
4300 if (n_ops == ARRAY_SIZE (ops))
4301 return NULL_RTX;
4303 ops[n_ops].op = XEXP (this_op, 1);
4304 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4305 n_ops++;
4307 ops[i].op = XEXP (this_op, 0);
4308 changed = 1;
4309 /* If this operand was negated then we will potentially
4310 canonicalize the expression. Similarly if we don't
4311 place the operands adjacent we're re-ordering the
4312 expression and thus might be performing a
4313 canonicalization. Ignore register re-ordering.
4314 ??? It might be better to shuffle the ops array here,
4315 but then (plus (plus (A, B), plus (C, D))) wouldn't
4316 be seen as non-canonical. */
4317 if (this_neg
4318 || (i != n_ops - 2
4319 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4320 canonicalized = 1;
4321 break;
4323 case NEG:
4324 ops[i].op = XEXP (this_op, 0);
4325 ops[i].neg = ! this_neg;
4326 changed = 1;
4327 canonicalized = 1;
4328 break;
4330 case CONST:
4331 if (n_ops != ARRAY_SIZE (ops)
4332 && GET_CODE (XEXP (this_op, 0)) == PLUS
4333 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4334 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4336 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4337 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4338 ops[n_ops].neg = this_neg;
4339 n_ops++;
4340 changed = 1;
4341 canonicalized = 1;
4343 break;
4345 case NOT:
4346 /* ~a -> (-a - 1) */
4347 if (n_ops != ARRAY_SIZE (ops))
4349 ops[n_ops].op = CONSTM1_RTX (mode);
4350 ops[n_ops++].neg = this_neg;
4351 ops[i].op = XEXP (this_op, 0);
4352 ops[i].neg = !this_neg;
4353 changed = 1;
4354 canonicalized = 1;
4356 break;
4358 case CONST_INT:
4359 n_constants++;
4360 if (this_neg)
4362 ops[i].op = neg_const_int (mode, this_op);
4363 ops[i].neg = 0;
4364 changed = 1;
4365 canonicalized = 1;
4367 break;
4369 default:
4370 break;
4374 while (changed);
4376 if (n_constants > 1)
4377 canonicalized = 1;
4379 gcc_assert (n_ops >= 2);
4381 /* If we only have two operands, we can avoid the loops. */
4382 if (n_ops == 2)
4384 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4385 rtx lhs, rhs;
4387 /* Get the two operands. Be careful with the order, especially for
4388 the cases where code == MINUS. */
4389 if (ops[0].neg && ops[1].neg)
4391 lhs = gen_rtx_NEG (mode, ops[0].op);
4392 rhs = ops[1].op;
4394 else if (ops[0].neg)
4396 lhs = ops[1].op;
4397 rhs = ops[0].op;
4399 else
4401 lhs = ops[0].op;
4402 rhs = ops[1].op;
4405 return simplify_const_binary_operation (code, mode, lhs, rhs);
4408 /* Now simplify each pair of operands until nothing changes. */
4409 while (1)
4411 /* Insertion sort is good enough for a small array. */
4412 for (i = 1; i < n_ops; i++)
4414 struct simplify_plus_minus_op_data save;
4415 int cmp;
4417 j = i - 1;
4418 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4419 if (cmp <= 0)
4420 continue;
4421 /* Just swapping registers doesn't count as canonicalization. */
4422 if (cmp != 1)
4423 canonicalized = 1;
4425 save = ops[i];
4427 ops[j + 1] = ops[j];
4428 while (j--
4429 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4430 ops[j + 1] = save;
4433 changed = 0;
4434 for (i = n_ops - 1; i > 0; i--)
4435 for (j = i - 1; j >= 0; j--)
4437 rtx lhs = ops[j].op, rhs = ops[i].op;
4438 int lneg = ops[j].neg, rneg = ops[i].neg;
4440 if (lhs != 0 && rhs != 0)
4442 enum rtx_code ncode = PLUS;
4444 if (lneg != rneg)
4446 ncode = MINUS;
4447 if (lneg)
4448 std::swap (lhs, rhs);
4450 else if (swap_commutative_operands_p (lhs, rhs))
4451 std::swap (lhs, rhs);
4453 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4454 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4456 rtx tem_lhs, tem_rhs;
4458 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4459 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4460 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4461 tem_rhs);
4463 if (tem && !CONSTANT_P (tem))
4464 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4466 else
4467 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4469 if (tem)
4471 /* Reject "simplifications" that just wrap the two
4472 arguments in a CONST. Failure to do so can result
4473 in infinite recursion with simplify_binary_operation
4474 when it calls us to simplify CONST operations.
4475 Also, if we find such a simplification, don't try
4476 any more combinations with this rhs: We must have
4477 something like symbol+offset, ie. one of the
4478 trivial CONST expressions we handle later. */
4479 if (GET_CODE (tem) == CONST
4480 && GET_CODE (XEXP (tem, 0)) == ncode
4481 && XEXP (XEXP (tem, 0), 0) == lhs
4482 && XEXP (XEXP (tem, 0), 1) == rhs)
4483 break;
4484 lneg &= rneg;
4485 if (GET_CODE (tem) == NEG)
4486 tem = XEXP (tem, 0), lneg = !lneg;
4487 if (CONST_INT_P (tem) && lneg)
4488 tem = neg_const_int (mode, tem), lneg = 0;
4490 ops[i].op = tem;
4491 ops[i].neg = lneg;
4492 ops[j].op = NULL_RTX;
4493 changed = 1;
4494 canonicalized = 1;
4499 if (!changed)
4500 break;
4502 /* Pack all the operands to the lower-numbered entries. */
4503 for (i = 0, j = 0; j < n_ops; j++)
4504 if (ops[j].op)
4506 ops[i] = ops[j];
4507 i++;
4509 n_ops = i;
4512 /* If nothing changed, check that rematerialization of rtl instructions
4513 is still required. */
4514 if (!canonicalized)
4516 /* Perform rematerialization if only all operands are registers and
4517 all operations are PLUS. */
4518 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4519 around rs6000 and how it uses the CA register. See PR67145. */
4520 for (i = 0; i < n_ops; i++)
4521 if (ops[i].neg
4522 || !REG_P (ops[i].op)
4523 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4524 && fixed_regs[REGNO (ops[i].op)]
4525 && !global_regs[REGNO (ops[i].op)]
4526 && ops[i].op != frame_pointer_rtx
4527 && ops[i].op != arg_pointer_rtx
4528 && ops[i].op != stack_pointer_rtx))
4529 return NULL_RTX;
4530 goto gen_result;
4533 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4534 if (n_ops == 2
4535 && CONST_INT_P (ops[1].op)
4536 && CONSTANT_P (ops[0].op)
4537 && ops[0].neg)
4538 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4540 /* We suppressed creation of trivial CONST expressions in the
4541 combination loop to avoid recursion. Create one manually now.
4542 The combination loop should have ensured that there is exactly
4543 one CONST_INT, and the sort will have ensured that it is last
4544 in the array and that any other constant will be next-to-last. */
4546 if (n_ops > 1
4547 && CONST_INT_P (ops[n_ops - 1].op)
4548 && CONSTANT_P (ops[n_ops - 2].op))
4550 rtx value = ops[n_ops - 1].op;
4551 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4552 value = neg_const_int (mode, value);
4553 if (CONST_INT_P (value))
4555 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4556 INTVAL (value));
4557 n_ops--;
4561 /* Put a non-negated operand first, if possible. */
4563 for (i = 0; i < n_ops && ops[i].neg; i++)
4564 continue;
4565 if (i == n_ops)
4566 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4567 else if (i != 0)
4569 tem = ops[0].op;
4570 ops[0] = ops[i];
4571 ops[i].op = tem;
4572 ops[i].neg = 1;
4575 /* Now make the result by performing the requested operations. */
4576 gen_result:
4577 result = ops[0].op;
4578 for (i = 1; i < n_ops; i++)
4579 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4580 mode, result, ops[i].op);
4582 return result;
4585 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4586 static bool
4587 plus_minus_operand_p (const_rtx x)
4589 return GET_CODE (x) == PLUS
4590 || GET_CODE (x) == MINUS
4591 || (GET_CODE (x) == CONST
4592 && GET_CODE (XEXP (x, 0)) == PLUS
4593 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4594 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4597 /* Like simplify_binary_operation except used for relational operators.
4598 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4599 not also be VOIDmode.
4601 CMP_MODE specifies in which mode the comparison is done in, so it is
4602 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4603 the operands or, if both are VOIDmode, the operands are compared in
4604 "infinite precision". */
4606 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4607 machine_mode cmp_mode, rtx op0, rtx op1)
4609 rtx tem, trueop0, trueop1;
4611 if (cmp_mode == VOIDmode)
4612 cmp_mode = GET_MODE (op0);
4613 if (cmp_mode == VOIDmode)
4614 cmp_mode = GET_MODE (op1);
4616 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4617 if (tem)
4619 if (SCALAR_FLOAT_MODE_P (mode))
4621 if (tem == const0_rtx)
4622 return CONST0_RTX (mode);
4623 #ifdef FLOAT_STORE_FLAG_VALUE
4625 REAL_VALUE_TYPE val;
4626 val = FLOAT_STORE_FLAG_VALUE (mode);
4627 return const_double_from_real_value (val, mode);
4629 #else
4630 return NULL_RTX;
4631 #endif
4633 if (VECTOR_MODE_P (mode))
4635 if (tem == const0_rtx)
4636 return CONST0_RTX (mode);
4637 #ifdef VECTOR_STORE_FLAG_VALUE
4639 int i, units;
4640 rtvec v;
4642 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4643 if (val == NULL_RTX)
4644 return NULL_RTX;
4645 if (val == const1_rtx)
4646 return CONST1_RTX (mode);
4648 units = GET_MODE_NUNITS (mode);
4649 v = rtvec_alloc (units);
4650 for (i = 0; i < units; i++)
4651 RTVEC_ELT (v, i) = val;
4652 return gen_rtx_raw_CONST_VECTOR (mode, v);
4654 #else
4655 return NULL_RTX;
4656 #endif
4659 return tem;
4662 /* For the following tests, ensure const0_rtx is op1. */
4663 if (swap_commutative_operands_p (op0, op1)
4664 || (op0 == const0_rtx && op1 != const0_rtx))
4665 std::swap (op0, op1), code = swap_condition (code);
4667 /* If op0 is a compare, extract the comparison arguments from it. */
4668 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4669 return simplify_gen_relational (code, mode, VOIDmode,
4670 XEXP (op0, 0), XEXP (op0, 1));
4672 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4673 || CC0_P (op0))
4674 return NULL_RTX;
4676 trueop0 = avoid_constant_pool_reference (op0);
4677 trueop1 = avoid_constant_pool_reference (op1);
4678 return simplify_relational_operation_1 (code, mode, cmp_mode,
4679 trueop0, trueop1);
4682 /* This part of simplify_relational_operation is only used when CMP_MODE
4683 is not in class MODE_CC (i.e. it is a real comparison).
4685 MODE is the mode of the result, while CMP_MODE specifies in which
4686 mode the comparison is done in, so it is the mode of the operands. */
4688 static rtx
4689 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4690 machine_mode cmp_mode, rtx op0, rtx op1)
4692 enum rtx_code op0code = GET_CODE (op0);
4694 if (op1 == const0_rtx && COMPARISON_P (op0))
4696 /* If op0 is a comparison, extract the comparison arguments
4697 from it. */
4698 if (code == NE)
4700 if (GET_MODE (op0) == mode)
4701 return simplify_rtx (op0);
4702 else
4703 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4704 XEXP (op0, 0), XEXP (op0, 1));
4706 else if (code == EQ)
4708 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4709 if (new_code != UNKNOWN)
4710 return simplify_gen_relational (new_code, mode, VOIDmode,
4711 XEXP (op0, 0), XEXP (op0, 1));
4715 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4716 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4717 if ((code == LTU || code == GEU)
4718 && GET_CODE (op0) == PLUS
4719 && CONST_INT_P (XEXP (op0, 1))
4720 && (rtx_equal_p (op1, XEXP (op0, 0))
4721 || rtx_equal_p (op1, XEXP (op0, 1)))
4722 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4723 && XEXP (op0, 1) != const0_rtx)
4725 rtx new_cmp
4726 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4727 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4728 cmp_mode, XEXP (op0, 0), new_cmp);
4731 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4732 transformed into (LTU a -C). */
4733 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4734 && CONST_INT_P (XEXP (op0, 1))
4735 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4736 && XEXP (op0, 1) != const0_rtx)
4738 rtx new_cmp
4739 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4740 return simplify_gen_relational (LTU, mode, cmp_mode,
4741 XEXP (op0, 0), new_cmp);
4744 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4745 if ((code == LTU || code == GEU)
4746 && GET_CODE (op0) == PLUS
4747 && rtx_equal_p (op1, XEXP (op0, 1))
4748 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4749 && !rtx_equal_p (op1, XEXP (op0, 0)))
4750 return simplify_gen_relational (code, mode, cmp_mode, op0,
4751 copy_rtx (XEXP (op0, 0)));
4753 if (op1 == const0_rtx)
4755 /* Canonicalize (GTU x 0) as (NE x 0). */
4756 if (code == GTU)
4757 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4758 /* Canonicalize (LEU x 0) as (EQ x 0). */
4759 if (code == LEU)
4760 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4762 else if (op1 == const1_rtx)
4764 switch (code)
4766 case GE:
4767 /* Canonicalize (GE x 1) as (GT x 0). */
4768 return simplify_gen_relational (GT, mode, cmp_mode,
4769 op0, const0_rtx);
4770 case GEU:
4771 /* Canonicalize (GEU x 1) as (NE x 0). */
4772 return simplify_gen_relational (NE, mode, cmp_mode,
4773 op0, const0_rtx);
4774 case LT:
4775 /* Canonicalize (LT x 1) as (LE x 0). */
4776 return simplify_gen_relational (LE, mode, cmp_mode,
4777 op0, const0_rtx);
4778 case LTU:
4779 /* Canonicalize (LTU x 1) as (EQ x 0). */
4780 return simplify_gen_relational (EQ, mode, cmp_mode,
4781 op0, const0_rtx);
4782 default:
4783 break;
4786 else if (op1 == constm1_rtx)
4788 /* Canonicalize (LE x -1) as (LT x 0). */
4789 if (code == LE)
4790 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4791 /* Canonicalize (GT x -1) as (GE x 0). */
4792 if (code == GT)
4793 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4796 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4797 if ((code == EQ || code == NE)
4798 && (op0code == PLUS || op0code == MINUS)
4799 && CONSTANT_P (op1)
4800 && CONSTANT_P (XEXP (op0, 1))
4801 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4803 rtx x = XEXP (op0, 0);
4804 rtx c = XEXP (op0, 1);
4805 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4806 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4808 /* Detect an infinite recursive condition, where we oscillate at this
4809 simplification case between:
4810 A + B == C <---> C - B == A,
4811 where A, B, and C are all constants with non-simplifiable expressions,
4812 usually SYMBOL_REFs. */
4813 if (GET_CODE (tem) == invcode
4814 && CONSTANT_P (x)
4815 && rtx_equal_p (c, XEXP (tem, 1)))
4816 return NULL_RTX;
4818 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4821 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4822 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4823 scalar_int_mode int_mode, int_cmp_mode;
4824 if (code == NE
4825 && op1 == const0_rtx
4826 && is_int_mode (mode, &int_mode)
4827 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4828 /* ??? Work-around BImode bugs in the ia64 backend. */
4829 && int_mode != BImode
4830 && int_cmp_mode != BImode
4831 && nonzero_bits (op0, int_cmp_mode) == 1
4832 && STORE_FLAG_VALUE == 1)
4833 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4834 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
4835 : lowpart_subreg (int_mode, op0, int_cmp_mode);
4837 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4838 if ((code == EQ || code == NE)
4839 && op1 == const0_rtx
4840 && op0code == XOR)
4841 return simplify_gen_relational (code, mode, cmp_mode,
4842 XEXP (op0, 0), XEXP (op0, 1));
4844 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4845 if ((code == EQ || code == NE)
4846 && op0code == XOR
4847 && rtx_equal_p (XEXP (op0, 0), op1)
4848 && !side_effects_p (XEXP (op0, 0)))
4849 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4850 CONST0_RTX (mode));
4852 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4853 if ((code == EQ || code == NE)
4854 && op0code == XOR
4855 && rtx_equal_p (XEXP (op0, 1), op1)
4856 && !side_effects_p (XEXP (op0, 1)))
4857 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4858 CONST0_RTX (mode));
4860 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4861 if ((code == EQ || code == NE)
4862 && op0code == XOR
4863 && CONST_SCALAR_INT_P (op1)
4864 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4865 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4866 simplify_gen_binary (XOR, cmp_mode,
4867 XEXP (op0, 1), op1));
4869 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4870 can be implemented with a BICS instruction on some targets, or
4871 constant-folded if y is a constant. */
4872 if ((code == EQ || code == NE)
4873 && op0code == AND
4874 && rtx_equal_p (XEXP (op0, 0), op1)
4875 && !side_effects_p (op1)
4876 && op1 != CONST0_RTX (cmp_mode))
4878 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4879 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4881 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4882 CONST0_RTX (cmp_mode));
4885 /* Likewise for (eq/ne (and x y) y). */
4886 if ((code == EQ || code == NE)
4887 && op0code == AND
4888 && rtx_equal_p (XEXP (op0, 1), op1)
4889 && !side_effects_p (op1)
4890 && op1 != CONST0_RTX (cmp_mode))
4892 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4893 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4895 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4896 CONST0_RTX (cmp_mode));
4899 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4900 if ((code == EQ || code == NE)
4901 && GET_CODE (op0) == BSWAP
4902 && CONST_SCALAR_INT_P (op1))
4903 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4904 simplify_gen_unary (BSWAP, cmp_mode,
4905 op1, cmp_mode));
4907 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4908 if ((code == EQ || code == NE)
4909 && GET_CODE (op0) == BSWAP
4910 && GET_CODE (op1) == BSWAP)
4911 return simplify_gen_relational (code, mode, cmp_mode,
4912 XEXP (op0, 0), XEXP (op1, 0));
4914 if (op0code == POPCOUNT && op1 == const0_rtx)
4915 switch (code)
4917 case EQ:
4918 case LE:
4919 case LEU:
4920 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4921 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4922 XEXP (op0, 0), const0_rtx);
4924 case NE:
4925 case GT:
4926 case GTU:
4927 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4928 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4929 XEXP (op0, 0), const0_rtx);
4931 default:
4932 break;
4935 return NULL_RTX;
4938 enum
4940 CMP_EQ = 1,
4941 CMP_LT = 2,
4942 CMP_GT = 4,
4943 CMP_LTU = 8,
4944 CMP_GTU = 16
4948 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4949 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4950 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4951 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4952 For floating-point comparisons, assume that the operands were ordered. */
4954 static rtx
4955 comparison_result (enum rtx_code code, int known_results)
4957 switch (code)
4959 case EQ:
4960 case UNEQ:
4961 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4962 case NE:
4963 case LTGT:
4964 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4966 case LT:
4967 case UNLT:
4968 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4969 case GE:
4970 case UNGE:
4971 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4973 case GT:
4974 case UNGT:
4975 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4976 case LE:
4977 case UNLE:
4978 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4980 case LTU:
4981 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4982 case GEU:
4983 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4985 case GTU:
4986 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4987 case LEU:
4988 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4990 case ORDERED:
4991 return const_true_rtx;
4992 case UNORDERED:
4993 return const0_rtx;
4994 default:
4995 gcc_unreachable ();
4999 /* Check if the given comparison (done in the given MODE) is actually
5000 a tautology or a contradiction. If the mode is VOID_mode, the
5001 comparison is done in "infinite precision". If no simplification
5002 is possible, this function returns zero. Otherwise, it returns
5003 either const_true_rtx or const0_rtx. */
5006 simplify_const_relational_operation (enum rtx_code code,
5007 machine_mode mode,
5008 rtx op0, rtx op1)
5010 rtx tem;
5011 rtx trueop0;
5012 rtx trueop1;
5014 gcc_assert (mode != VOIDmode
5015 || (GET_MODE (op0) == VOIDmode
5016 && GET_MODE (op1) == VOIDmode));
5018 /* If op0 is a compare, extract the comparison arguments from it. */
5019 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5021 op1 = XEXP (op0, 1);
5022 op0 = XEXP (op0, 0);
5024 if (GET_MODE (op0) != VOIDmode)
5025 mode = GET_MODE (op0);
5026 else if (GET_MODE (op1) != VOIDmode)
5027 mode = GET_MODE (op1);
5028 else
5029 return 0;
5032 /* We can't simplify MODE_CC values since we don't know what the
5033 actual comparison is. */
5034 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5035 return 0;
5037 /* Make sure the constant is second. */
5038 if (swap_commutative_operands_p (op0, op1))
5040 std::swap (op0, op1);
5041 code = swap_condition (code);
5044 trueop0 = avoid_constant_pool_reference (op0);
5045 trueop1 = avoid_constant_pool_reference (op1);
5047 /* For integer comparisons of A and B maybe we can simplify A - B and can
5048 then simplify a comparison of that with zero. If A and B are both either
5049 a register or a CONST_INT, this can't help; testing for these cases will
5050 prevent infinite recursion here and speed things up.
5052 We can only do this for EQ and NE comparisons as otherwise we may
5053 lose or introduce overflow which we cannot disregard as undefined as
5054 we do not know the signedness of the operation on either the left or
5055 the right hand side of the comparison. */
5057 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5058 && (code == EQ || code == NE)
5059 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5060 && (REG_P (op1) || CONST_INT_P (trueop1)))
5061 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5062 /* We cannot do this if tem is a nonzero address. */
5063 && ! nonzero_address_p (tem))
5064 return simplify_const_relational_operation (signed_condition (code),
5065 mode, tem, const0_rtx);
5067 if (! HONOR_NANS (mode) && code == ORDERED)
5068 return const_true_rtx;
5070 if (! HONOR_NANS (mode) && code == UNORDERED)
5071 return const0_rtx;
5073 /* For modes without NaNs, if the two operands are equal, we know the
5074 result except if they have side-effects. Even with NaNs we know
5075 the result of unordered comparisons and, if signaling NaNs are
5076 irrelevant, also the result of LT/GT/LTGT. */
5077 if ((! HONOR_NANS (trueop0)
5078 || code == UNEQ || code == UNLE || code == UNGE
5079 || ((code == LT || code == GT || code == LTGT)
5080 && ! HONOR_SNANS (trueop0)))
5081 && rtx_equal_p (trueop0, trueop1)
5082 && ! side_effects_p (trueop0))
5083 return comparison_result (code, CMP_EQ);
5085 /* If the operands are floating-point constants, see if we can fold
5086 the result. */
5087 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5088 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5089 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5091 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5092 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5094 /* Comparisons are unordered iff at least one of the values is NaN. */
5095 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5096 switch (code)
5098 case UNEQ:
5099 case UNLT:
5100 case UNGT:
5101 case UNLE:
5102 case UNGE:
5103 case NE:
5104 case UNORDERED:
5105 return const_true_rtx;
5106 case EQ:
5107 case LT:
5108 case GT:
5109 case LE:
5110 case GE:
5111 case LTGT:
5112 case ORDERED:
5113 return const0_rtx;
5114 default:
5115 return 0;
5118 return comparison_result (code,
5119 (real_equal (d0, d1) ? CMP_EQ :
5120 real_less (d0, d1) ? CMP_LT : CMP_GT));
5123 /* Otherwise, see if the operands are both integers. */
5124 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5125 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5127 /* It would be nice if we really had a mode here. However, the
5128 largest int representable on the target is as good as
5129 infinite. */
5130 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5131 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5132 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5134 if (wi::eq_p (ptrueop0, ptrueop1))
5135 return comparison_result (code, CMP_EQ);
5136 else
5138 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5139 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5140 return comparison_result (code, cr);
5144 /* Optimize comparisons with upper and lower bounds. */
5145 scalar_int_mode int_mode;
5146 if (CONST_INT_P (trueop1)
5147 && is_a <scalar_int_mode> (mode, &int_mode)
5148 && HWI_COMPUTABLE_MODE_P (int_mode)
5149 && !side_effects_p (trueop0))
5151 int sign;
5152 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5153 HOST_WIDE_INT val = INTVAL (trueop1);
5154 HOST_WIDE_INT mmin, mmax;
5156 if (code == GEU
5157 || code == LEU
5158 || code == GTU
5159 || code == LTU)
5160 sign = 0;
5161 else
5162 sign = 1;
5164 /* Get a reduced range if the sign bit is zero. */
5165 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5167 mmin = 0;
5168 mmax = nonzero;
5170 else
5172 rtx mmin_rtx, mmax_rtx;
5173 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5175 mmin = INTVAL (mmin_rtx);
5176 mmax = INTVAL (mmax_rtx);
5177 if (sign)
5179 unsigned int sign_copies
5180 = num_sign_bit_copies (trueop0, int_mode);
5182 mmin >>= (sign_copies - 1);
5183 mmax >>= (sign_copies - 1);
5187 switch (code)
5189 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5190 case GEU:
5191 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5192 return const_true_rtx;
5193 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5194 return const0_rtx;
5195 break;
5196 case GE:
5197 if (val <= mmin)
5198 return const_true_rtx;
5199 if (val > mmax)
5200 return const0_rtx;
5201 break;
5203 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5204 case LEU:
5205 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5206 return const_true_rtx;
5207 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5208 return const0_rtx;
5209 break;
5210 case LE:
5211 if (val >= mmax)
5212 return const_true_rtx;
5213 if (val < mmin)
5214 return const0_rtx;
5215 break;
5217 case EQ:
5218 /* x == y is always false for y out of range. */
5219 if (val < mmin || val > mmax)
5220 return const0_rtx;
5221 break;
5223 /* x > y is always false for y >= mmax, always true for y < mmin. */
5224 case GTU:
5225 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5226 return const0_rtx;
5227 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5228 return const_true_rtx;
5229 break;
5230 case GT:
5231 if (val >= mmax)
5232 return const0_rtx;
5233 if (val < mmin)
5234 return const_true_rtx;
5235 break;
5237 /* x < y is always false for y <= mmin, always true for y > mmax. */
5238 case LTU:
5239 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5240 return const0_rtx;
5241 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5242 return const_true_rtx;
5243 break;
5244 case LT:
5245 if (val <= mmin)
5246 return const0_rtx;
5247 if (val > mmax)
5248 return const_true_rtx;
5249 break;
5251 case NE:
5252 /* x != y is always true for y out of range. */
5253 if (val < mmin || val > mmax)
5254 return const_true_rtx;
5255 break;
5257 default:
5258 break;
5262 /* Optimize integer comparisons with zero. */
5263 if (is_a <scalar_int_mode> (mode, &int_mode)
5264 && trueop1 == const0_rtx
5265 && !side_effects_p (trueop0))
5267 /* Some addresses are known to be nonzero. We don't know
5268 their sign, but equality comparisons are known. */
5269 if (nonzero_address_p (trueop0))
5271 if (code == EQ || code == LEU)
5272 return const0_rtx;
5273 if (code == NE || code == GTU)
5274 return const_true_rtx;
5277 /* See if the first operand is an IOR with a constant. If so, we
5278 may be able to determine the result of this comparison. */
5279 if (GET_CODE (op0) == IOR)
5281 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5282 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5284 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5285 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5286 && (UINTVAL (inner_const)
5287 & (HOST_WIDE_INT_1U
5288 << sign_bitnum)));
5290 switch (code)
5292 case EQ:
5293 case LEU:
5294 return const0_rtx;
5295 case NE:
5296 case GTU:
5297 return const_true_rtx;
5298 case LT:
5299 case LE:
5300 if (has_sign)
5301 return const_true_rtx;
5302 break;
5303 case GT:
5304 case GE:
5305 if (has_sign)
5306 return const0_rtx;
5307 break;
5308 default:
5309 break;
5315 /* Optimize comparison of ABS with zero. */
5316 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5317 && (GET_CODE (trueop0) == ABS
5318 || (GET_CODE (trueop0) == FLOAT_EXTEND
5319 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5321 switch (code)
5323 case LT:
5324 /* Optimize abs(x) < 0.0. */
5325 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5326 return const0_rtx;
5327 break;
5329 case GE:
5330 /* Optimize abs(x) >= 0.0. */
5331 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5332 return const_true_rtx;
5333 break;
5335 case UNGE:
5336 /* Optimize ! (abs(x) < 0.0). */
5337 return const_true_rtx;
5339 default:
5340 break;
5344 return 0;
5347 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5348 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5349 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5350 can be simplified to that or NULL_RTX if not.
5351 Assume X is compared against zero with CMP_CODE and the true
5352 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5354 static rtx
5355 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5357 if (cmp_code != EQ && cmp_code != NE)
5358 return NULL_RTX;
5360 /* Result on X == 0 and X !=0 respectively. */
5361 rtx on_zero, on_nonzero;
5362 if (cmp_code == EQ)
5364 on_zero = true_val;
5365 on_nonzero = false_val;
5367 else
5369 on_zero = false_val;
5370 on_nonzero = true_val;
5373 rtx_code op_code = GET_CODE (on_nonzero);
5374 if ((op_code != CLZ && op_code != CTZ)
5375 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5376 || !CONST_INT_P (on_zero))
5377 return NULL_RTX;
5379 HOST_WIDE_INT op_val;
5380 scalar_int_mode mode ATTRIBUTE_UNUSED
5381 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5382 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5383 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5384 && op_val == INTVAL (on_zero))
5385 return on_nonzero;
5387 return NULL_RTX;
5391 /* Simplify CODE, an operation with result mode MODE and three operands,
5392 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5393 a constant. Return 0 if no simplifications is possible. */
5396 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5397 machine_mode op0_mode, rtx op0, rtx op1,
5398 rtx op2)
5400 bool any_change = false;
5401 rtx tem, trueop2;
5402 scalar_int_mode int_mode, int_op0_mode;
5404 switch (code)
5406 case FMA:
5407 /* Simplify negations around the multiplication. */
5408 /* -a * -b + c => a * b + c. */
5409 if (GET_CODE (op0) == NEG)
5411 tem = simplify_unary_operation (NEG, mode, op1, mode);
5412 if (tem)
5413 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5415 else if (GET_CODE (op1) == NEG)
5417 tem = simplify_unary_operation (NEG, mode, op0, mode);
5418 if (tem)
5419 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5422 /* Canonicalize the two multiplication operands. */
5423 /* a * -b + c => -b * a + c. */
5424 if (swap_commutative_operands_p (op0, op1))
5425 std::swap (op0, op1), any_change = true;
5427 if (any_change)
5428 return gen_rtx_FMA (mode, op0, op1, op2);
5429 return NULL_RTX;
5431 case SIGN_EXTRACT:
5432 case ZERO_EXTRACT:
5433 if (CONST_INT_P (op0)
5434 && CONST_INT_P (op1)
5435 && CONST_INT_P (op2)
5436 && is_a <scalar_int_mode> (mode, &int_mode)
5437 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5438 && HWI_COMPUTABLE_MODE_P (int_mode))
5440 /* Extracting a bit-field from a constant */
5441 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5442 HOST_WIDE_INT op1val = INTVAL (op1);
5443 HOST_WIDE_INT op2val = INTVAL (op2);
5444 if (!BITS_BIG_ENDIAN)
5445 val >>= op2val;
5446 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5447 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5448 else
5449 /* Not enough information to calculate the bit position. */
5450 break;
5452 if (HOST_BITS_PER_WIDE_INT != op1val)
5454 /* First zero-extend. */
5455 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5456 /* If desired, propagate sign bit. */
5457 if (code == SIGN_EXTRACT
5458 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5459 != 0)
5460 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5463 return gen_int_mode (val, int_mode);
5465 break;
5467 case IF_THEN_ELSE:
5468 if (CONST_INT_P (op0))
5469 return op0 != const0_rtx ? op1 : op2;
5471 /* Convert c ? a : a into "a". */
5472 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5473 return op1;
5475 /* Convert a != b ? a : b into "a". */
5476 if (GET_CODE (op0) == NE
5477 && ! side_effects_p (op0)
5478 && ! HONOR_NANS (mode)
5479 && ! HONOR_SIGNED_ZEROS (mode)
5480 && ((rtx_equal_p (XEXP (op0, 0), op1)
5481 && rtx_equal_p (XEXP (op0, 1), op2))
5482 || (rtx_equal_p (XEXP (op0, 0), op2)
5483 && rtx_equal_p (XEXP (op0, 1), op1))))
5484 return op1;
5486 /* Convert a == b ? a : b into "b". */
5487 if (GET_CODE (op0) == EQ
5488 && ! side_effects_p (op0)
5489 && ! HONOR_NANS (mode)
5490 && ! HONOR_SIGNED_ZEROS (mode)
5491 && ((rtx_equal_p (XEXP (op0, 0), op1)
5492 && rtx_equal_p (XEXP (op0, 1), op2))
5493 || (rtx_equal_p (XEXP (op0, 0), op2)
5494 && rtx_equal_p (XEXP (op0, 1), op1))))
5495 return op2;
5497 /* Convert (!c) != {0,...,0} ? a : b into
5498 c != {0,...,0} ? b : a for vector modes. */
5499 if (VECTOR_MODE_P (GET_MODE (op1))
5500 && GET_CODE (op0) == NE
5501 && GET_CODE (XEXP (op0, 0)) == NOT
5502 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5504 rtx cv = XEXP (op0, 1);
5505 int nunits = CONST_VECTOR_NUNITS (cv);
5506 bool ok = true;
5507 for (int i = 0; i < nunits; ++i)
5508 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5510 ok = false;
5511 break;
5513 if (ok)
5515 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5516 XEXP (XEXP (op0, 0), 0),
5517 XEXP (op0, 1));
5518 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5519 return retval;
5523 /* Convert x == 0 ? N : clz (x) into clz (x) when
5524 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5525 Similarly for ctz (x). */
5526 if (COMPARISON_P (op0) && !side_effects_p (op0)
5527 && XEXP (op0, 1) == const0_rtx)
5529 rtx simplified
5530 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5531 op1, op2);
5532 if (simplified)
5533 return simplified;
5536 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5538 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5539 ? GET_MODE (XEXP (op0, 1))
5540 : GET_MODE (XEXP (op0, 0)));
5541 rtx temp;
5543 /* Look for happy constants in op1 and op2. */
5544 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5546 HOST_WIDE_INT t = INTVAL (op1);
5547 HOST_WIDE_INT f = INTVAL (op2);
5549 if (t == STORE_FLAG_VALUE && f == 0)
5550 code = GET_CODE (op0);
5551 else if (t == 0 && f == STORE_FLAG_VALUE)
5553 enum rtx_code tmp;
5554 tmp = reversed_comparison_code (op0, NULL);
5555 if (tmp == UNKNOWN)
5556 break;
5557 code = tmp;
5559 else
5560 break;
5562 return simplify_gen_relational (code, mode, cmp_mode,
5563 XEXP (op0, 0), XEXP (op0, 1));
5566 if (cmp_mode == VOIDmode)
5567 cmp_mode = op0_mode;
5568 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5569 cmp_mode, XEXP (op0, 0),
5570 XEXP (op0, 1));
5572 /* See if any simplifications were possible. */
5573 if (temp)
5575 if (CONST_INT_P (temp))
5576 return temp == const0_rtx ? op2 : op1;
5577 else if (temp)
5578 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5581 break;
5583 case VEC_MERGE:
5584 gcc_assert (GET_MODE (op0) == mode);
5585 gcc_assert (GET_MODE (op1) == mode);
5586 gcc_assert (VECTOR_MODE_P (mode));
5587 trueop2 = avoid_constant_pool_reference (op2);
5588 if (CONST_INT_P (trueop2))
5590 int elt_size = GET_MODE_UNIT_SIZE (mode);
5591 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5592 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5593 unsigned HOST_WIDE_INT mask;
5594 if (n_elts == HOST_BITS_PER_WIDE_INT)
5595 mask = -1;
5596 else
5597 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5599 if (!(sel & mask) && !side_effects_p (op0))
5600 return op1;
5601 if ((sel & mask) == mask && !side_effects_p (op1))
5602 return op0;
5604 rtx trueop0 = avoid_constant_pool_reference (op0);
5605 rtx trueop1 = avoid_constant_pool_reference (op1);
5606 if (GET_CODE (trueop0) == CONST_VECTOR
5607 && GET_CODE (trueop1) == CONST_VECTOR)
5609 rtvec v = rtvec_alloc (n_elts);
5610 unsigned int i;
5612 for (i = 0; i < n_elts; i++)
5613 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5614 ? CONST_VECTOR_ELT (trueop0, i)
5615 : CONST_VECTOR_ELT (trueop1, i));
5616 return gen_rtx_CONST_VECTOR (mode, v);
5619 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5620 if no element from a appears in the result. */
5621 if (GET_CODE (op0) == VEC_MERGE)
5623 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5624 if (CONST_INT_P (tem))
5626 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5627 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5628 return simplify_gen_ternary (code, mode, mode,
5629 XEXP (op0, 1), op1, op2);
5630 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5631 return simplify_gen_ternary (code, mode, mode,
5632 XEXP (op0, 0), op1, op2);
5635 if (GET_CODE (op1) == VEC_MERGE)
5637 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5638 if (CONST_INT_P (tem))
5640 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5641 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5642 return simplify_gen_ternary (code, mode, mode,
5643 op0, XEXP (op1, 1), op2);
5644 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5645 return simplify_gen_ternary (code, mode, mode,
5646 op0, XEXP (op1, 0), op2);
5650 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5651 with a. */
5652 if (GET_CODE (op0) == VEC_DUPLICATE
5653 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5654 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5655 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5657 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5658 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5660 if (XEXP (XEXP (op0, 0), 0) == op1
5661 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5662 return op1;
5667 if (rtx_equal_p (op0, op1)
5668 && !side_effects_p (op2) && !side_effects_p (op1))
5669 return op0;
5671 break;
5673 default:
5674 gcc_unreachable ();
5677 return 0;
5680 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5681 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5682 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5684 Works by unpacking OP into a collection of 8-bit values
5685 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5686 and then repacking them again for OUTERMODE. */
5688 static rtx
5689 simplify_immed_subreg (machine_mode outermode, rtx op,
5690 machine_mode innermode, unsigned int byte)
5692 enum {
5693 value_bit = 8,
5694 value_mask = (1 << value_bit) - 1
5696 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5697 int value_start;
5698 int i;
5699 int elem;
5701 int num_elem;
5702 rtx * elems;
5703 int elem_bitsize;
5704 rtx result_s = NULL;
5705 rtvec result_v = NULL;
5706 enum mode_class outer_class;
5707 scalar_mode outer_submode;
5708 int max_bitsize;
5710 /* Some ports misuse CCmode. */
5711 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5712 return op;
5714 /* We have no way to represent a complex constant at the rtl level. */
5715 if (COMPLEX_MODE_P (outermode))
5716 return NULL_RTX;
5718 /* We support any size mode. */
5719 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5720 GET_MODE_BITSIZE (innermode));
5722 /* Unpack the value. */
5724 if (GET_CODE (op) == CONST_VECTOR)
5726 num_elem = CONST_VECTOR_NUNITS (op);
5727 elems = &CONST_VECTOR_ELT (op, 0);
5728 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5730 else
5732 num_elem = 1;
5733 elems = &op;
5734 elem_bitsize = max_bitsize;
5736 /* If this asserts, it is too complicated; reducing value_bit may help. */
5737 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5738 /* I don't know how to handle endianness of sub-units. */
5739 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5741 for (elem = 0; elem < num_elem; elem++)
5743 unsigned char * vp;
5744 rtx el = elems[elem];
5746 /* Vectors are kept in target memory order. (This is probably
5747 a mistake.) */
5749 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5750 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5751 / BITS_PER_UNIT);
5752 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5753 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5754 unsigned bytele = (subword_byte % UNITS_PER_WORD
5755 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5756 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5759 switch (GET_CODE (el))
5761 case CONST_INT:
5762 for (i = 0;
5763 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5764 i += value_bit)
5765 *vp++ = INTVAL (el) >> i;
5766 /* CONST_INTs are always logically sign-extended. */
5767 for (; i < elem_bitsize; i += value_bit)
5768 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5769 break;
5771 case CONST_WIDE_INT:
5773 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
5774 unsigned char extend = wi::sign_mask (val);
5775 int prec = wi::get_precision (val);
5777 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5778 *vp++ = wi::extract_uhwi (val, i, value_bit);
5779 for (; i < elem_bitsize; i += value_bit)
5780 *vp++ = extend;
5782 break;
5784 case CONST_DOUBLE:
5785 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5787 unsigned char extend = 0;
5788 /* If this triggers, someone should have generated a
5789 CONST_INT instead. */
5790 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5792 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5793 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5794 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5796 *vp++
5797 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5798 i += value_bit;
5801 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5802 extend = -1;
5803 for (; i < elem_bitsize; i += value_bit)
5804 *vp++ = extend;
5806 else
5808 /* This is big enough for anything on the platform. */
5809 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5810 scalar_float_mode el_mode;
5812 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5813 int bitsize = GET_MODE_BITSIZE (el_mode);
5815 gcc_assert (bitsize <= elem_bitsize);
5816 gcc_assert (bitsize % value_bit == 0);
5818 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5819 GET_MODE (el));
5821 /* real_to_target produces its result in words affected by
5822 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5823 and use WORDS_BIG_ENDIAN instead; see the documentation
5824 of SUBREG in rtl.texi. */
5825 for (i = 0; i < bitsize; i += value_bit)
5827 int ibase;
5828 if (WORDS_BIG_ENDIAN)
5829 ibase = bitsize - 1 - i;
5830 else
5831 ibase = i;
5832 *vp++ = tmp[ibase / 32] >> i % 32;
5835 /* It shouldn't matter what's done here, so fill it with
5836 zero. */
5837 for (; i < elem_bitsize; i += value_bit)
5838 *vp++ = 0;
5840 break;
5842 case CONST_FIXED:
5843 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5845 for (i = 0; i < elem_bitsize; i += value_bit)
5846 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5848 else
5850 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5851 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5852 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5853 i += value_bit)
5854 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5855 >> (i - HOST_BITS_PER_WIDE_INT);
5856 for (; i < elem_bitsize; i += value_bit)
5857 *vp++ = 0;
5859 break;
5861 default:
5862 gcc_unreachable ();
5866 /* Now, pick the right byte to start with. */
5867 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5868 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5869 will already have offset 0. */
5870 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5872 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5873 - byte);
5874 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5875 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5876 byte = (subword_byte % UNITS_PER_WORD
5877 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5880 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5881 so if it's become negative it will instead be very large.) */
5882 gcc_assert (byte < GET_MODE_SIZE (innermode));
5884 /* Convert from bytes to chunks of size value_bit. */
5885 value_start = byte * (BITS_PER_UNIT / value_bit);
5887 /* Re-pack the value. */
5888 num_elem = GET_MODE_NUNITS (outermode);
5890 if (VECTOR_MODE_P (outermode))
5892 result_v = rtvec_alloc (num_elem);
5893 elems = &RTVEC_ELT (result_v, 0);
5895 else
5896 elems = &result_s;
5898 outer_submode = GET_MODE_INNER (outermode);
5899 outer_class = GET_MODE_CLASS (outer_submode);
5900 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5902 gcc_assert (elem_bitsize % value_bit == 0);
5903 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5905 for (elem = 0; elem < num_elem; elem++)
5907 unsigned char *vp;
5909 /* Vectors are stored in target memory order. (This is probably
5910 a mistake.) */
5912 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5913 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5914 / BITS_PER_UNIT);
5915 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5916 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5917 unsigned bytele = (subword_byte % UNITS_PER_WORD
5918 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5919 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5922 switch (outer_class)
5924 case MODE_INT:
5925 case MODE_PARTIAL_INT:
5927 int u;
5928 int base = 0;
5929 int units
5930 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5931 / HOST_BITS_PER_WIDE_INT;
5932 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5933 wide_int r;
5935 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5936 return NULL_RTX;
5937 for (u = 0; u < units; u++)
5939 unsigned HOST_WIDE_INT buf = 0;
5940 for (i = 0;
5941 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5942 i += value_bit)
5943 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5945 tmp[u] = buf;
5946 base += HOST_BITS_PER_WIDE_INT;
5948 r = wide_int::from_array (tmp, units,
5949 GET_MODE_PRECISION (outer_submode));
5950 #if TARGET_SUPPORTS_WIDE_INT == 0
5951 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5952 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5953 return NULL_RTX;
5954 #endif
5955 elems[elem] = immed_wide_int_const (r, outer_submode);
5957 break;
5959 case MODE_FLOAT:
5960 case MODE_DECIMAL_FLOAT:
5962 REAL_VALUE_TYPE r;
5963 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5965 /* real_from_target wants its input in words affected by
5966 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5967 and use WORDS_BIG_ENDIAN instead; see the documentation
5968 of SUBREG in rtl.texi. */
5969 for (i = 0; i < elem_bitsize; i += value_bit)
5971 int ibase;
5972 if (WORDS_BIG_ENDIAN)
5973 ibase = elem_bitsize - 1 - i;
5974 else
5975 ibase = i;
5976 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5979 real_from_target (&r, tmp, outer_submode);
5980 elems[elem] = const_double_from_real_value (r, outer_submode);
5982 break;
5984 case MODE_FRACT:
5985 case MODE_UFRACT:
5986 case MODE_ACCUM:
5987 case MODE_UACCUM:
5989 FIXED_VALUE_TYPE f;
5990 f.data.low = 0;
5991 f.data.high = 0;
5992 f.mode = outer_submode;
5994 for (i = 0;
5995 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5996 i += value_bit)
5997 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5998 for (; i < elem_bitsize; i += value_bit)
5999 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6000 << (i - HOST_BITS_PER_WIDE_INT));
6002 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6004 break;
6006 default:
6007 gcc_unreachable ();
6010 if (VECTOR_MODE_P (outermode))
6011 return gen_rtx_CONST_VECTOR (outermode, result_v);
6012 else
6013 return result_s;
6016 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6017 Return 0 if no simplifications are possible. */
6019 simplify_subreg (machine_mode outermode, rtx op,
6020 machine_mode innermode, unsigned int byte)
6022 /* Little bit of sanity checking. */
6023 gcc_assert (innermode != VOIDmode);
6024 gcc_assert (outermode != VOIDmode);
6025 gcc_assert (innermode != BLKmode);
6026 gcc_assert (outermode != BLKmode);
6028 gcc_assert (GET_MODE (op) == innermode
6029 || GET_MODE (op) == VOIDmode);
6031 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6032 return NULL_RTX;
6034 if (byte >= GET_MODE_SIZE (innermode))
6035 return NULL_RTX;
6037 if (outermode == innermode && !byte)
6038 return op;
6040 if (CONST_SCALAR_INT_P (op)
6041 || CONST_DOUBLE_AS_FLOAT_P (op)
6042 || GET_CODE (op) == CONST_FIXED
6043 || GET_CODE (op) == CONST_VECTOR)
6044 return simplify_immed_subreg (outermode, op, innermode, byte);
6046 /* Changing mode twice with SUBREG => just change it once,
6047 or not at all if changing back op starting mode. */
6048 if (GET_CODE (op) == SUBREG)
6050 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6051 rtx newx;
6053 if (outermode == innermostmode
6054 && byte == 0 && SUBREG_BYTE (op) == 0)
6055 return SUBREG_REG (op);
6057 /* Work out the memory offset of the final OUTERMODE value relative
6058 to the inner value of OP. */
6059 HOST_WIDE_INT mem_offset = subreg_memory_offset (outermode,
6060 innermode, byte);
6061 HOST_WIDE_INT op_mem_offset = subreg_memory_offset (op);
6062 HOST_WIDE_INT final_offset = mem_offset + op_mem_offset;
6064 /* See whether resulting subreg will be paradoxical. */
6065 if (!paradoxical_subreg_p (outermode, innermostmode))
6067 /* In nonparadoxical subregs we can't handle negative offsets. */
6068 if (final_offset < 0)
6069 return NULL_RTX;
6070 /* Bail out in case resulting subreg would be incorrect. */
6071 if (final_offset % GET_MODE_SIZE (outermode)
6072 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6073 return NULL_RTX;
6075 else
6077 HOST_WIDE_INT required_offset
6078 = subreg_memory_offset (outermode, innermostmode, 0);
6079 if (final_offset != required_offset)
6080 return NULL_RTX;
6081 /* Paradoxical subregs always have byte offset 0. */
6082 final_offset = 0;
6085 /* Recurse for further possible simplifications. */
6086 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6087 final_offset);
6088 if (newx)
6089 return newx;
6090 if (validate_subreg (outermode, innermostmode,
6091 SUBREG_REG (op), final_offset))
6093 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6094 if (SUBREG_PROMOTED_VAR_P (op)
6095 && SUBREG_PROMOTED_SIGN (op) >= 0
6096 && GET_MODE_CLASS (outermode) == MODE_INT
6097 && IN_RANGE (GET_MODE_SIZE (outermode),
6098 GET_MODE_SIZE (innermode),
6099 GET_MODE_SIZE (innermostmode))
6100 && subreg_lowpart_p (newx))
6102 SUBREG_PROMOTED_VAR_P (newx) = 1;
6103 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6105 return newx;
6107 return NULL_RTX;
6110 /* SUBREG of a hard register => just change the register number
6111 and/or mode. If the hard register is not valid in that mode,
6112 suppress this simplification. If the hard register is the stack,
6113 frame, or argument pointer, leave this as a SUBREG. */
6115 if (REG_P (op) && HARD_REGISTER_P (op))
6117 unsigned int regno, final_regno;
6119 regno = REGNO (op);
6120 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6121 if (HARD_REGISTER_NUM_P (final_regno))
6123 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6124 subreg_memory_offset (outermode,
6125 innermode, byte));
6127 /* Propagate original regno. We don't have any way to specify
6128 the offset inside original regno, so do so only for lowpart.
6129 The information is used only by alias analysis that can not
6130 grog partial register anyway. */
6132 if (subreg_lowpart_offset (outermode, innermode) == byte)
6133 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6134 return x;
6138 /* If we have a SUBREG of a register that we are replacing and we are
6139 replacing it with a MEM, make a new MEM and try replacing the
6140 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6141 or if we would be widening it. */
6143 if (MEM_P (op)
6144 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6145 /* Allow splitting of volatile memory references in case we don't
6146 have instruction to move the whole thing. */
6147 && (! MEM_VOLATILE_P (op)
6148 || ! have_insn_for (SET, innermode))
6149 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6150 return adjust_address_nv (op, outermode, byte);
6152 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6153 of two parts. */
6154 if (GET_CODE (op) == CONCAT
6155 || GET_CODE (op) == VEC_CONCAT)
6157 unsigned int part_size, final_offset;
6158 rtx part, res;
6160 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6161 if (part_mode == VOIDmode)
6162 part_mode = GET_MODE_INNER (GET_MODE (op));
6163 part_size = GET_MODE_SIZE (part_mode);
6164 if (byte < part_size)
6166 part = XEXP (op, 0);
6167 final_offset = byte;
6169 else
6171 part = XEXP (op, 1);
6172 final_offset = byte - part_size;
6175 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6176 return NULL_RTX;
6178 part_mode = GET_MODE (part);
6179 if (part_mode == VOIDmode)
6180 part_mode = GET_MODE_INNER (GET_MODE (op));
6181 res = simplify_subreg (outermode, part, part_mode, final_offset);
6182 if (res)
6183 return res;
6184 if (validate_subreg (outermode, part_mode, part, final_offset))
6185 return gen_rtx_SUBREG (outermode, part, final_offset);
6186 return NULL_RTX;
6189 /* A SUBREG resulting from a zero extension may fold to zero if
6190 it extracts higher bits that the ZERO_EXTEND's source bits. */
6191 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6193 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6194 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6195 return CONST0_RTX (outermode);
6198 scalar_int_mode int_outermode, int_innermode;
6199 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6200 && is_a <scalar_int_mode> (innermode, &int_innermode)
6201 && (GET_MODE_PRECISION (int_outermode)
6202 < GET_MODE_PRECISION (int_innermode))
6203 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6205 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6206 if (tem)
6207 return tem;
6210 return NULL_RTX;
6213 /* Make a SUBREG operation or equivalent if it folds. */
6216 simplify_gen_subreg (machine_mode outermode, rtx op,
6217 machine_mode innermode, unsigned int byte)
6219 rtx newx;
6221 newx = simplify_subreg (outermode, op, innermode, byte);
6222 if (newx)
6223 return newx;
6225 if (GET_CODE (op) == SUBREG
6226 || GET_CODE (op) == CONCAT
6227 || GET_MODE (op) == VOIDmode)
6228 return NULL_RTX;
6230 if (validate_subreg (outermode, innermode, op, byte))
6231 return gen_rtx_SUBREG (outermode, op, byte);
6233 return NULL_RTX;
6236 /* Generates a subreg to get the least significant part of EXPR (in mode
6237 INNER_MODE) to OUTER_MODE. */
6240 lowpart_subreg (machine_mode outer_mode, rtx expr,
6241 machine_mode inner_mode)
6243 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6244 subreg_lowpart_offset (outer_mode, inner_mode));
6247 /* Simplify X, an rtx expression.
6249 Return the simplified expression or NULL if no simplifications
6250 were possible.
6252 This is the preferred entry point into the simplification routines;
6253 however, we still allow passes to call the more specific routines.
6255 Right now GCC has three (yes, three) major bodies of RTL simplification
6256 code that need to be unified.
6258 1. fold_rtx in cse.c. This code uses various CSE specific
6259 information to aid in RTL simplification.
6261 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6262 it uses combine specific information to aid in RTL
6263 simplification.
6265 3. The routines in this file.
6268 Long term we want to only have one body of simplification code; to
6269 get to that state I recommend the following steps:
6271 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6272 which are not pass dependent state into these routines.
6274 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6275 use this routine whenever possible.
6277 3. Allow for pass dependent state to be provided to these
6278 routines and add simplifications based on the pass dependent
6279 state. Remove code from cse.c & combine.c that becomes
6280 redundant/dead.
6282 It will take time, but ultimately the compiler will be easier to
6283 maintain and improve. It's totally silly that when we add a
6284 simplification that it needs to be added to 4 places (3 for RTL
6285 simplification and 1 for tree simplification. */
6288 simplify_rtx (const_rtx x)
6290 const enum rtx_code code = GET_CODE (x);
6291 const machine_mode mode = GET_MODE (x);
6293 switch (GET_RTX_CLASS (code))
6295 case RTX_UNARY:
6296 return simplify_unary_operation (code, mode,
6297 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6298 case RTX_COMM_ARITH:
6299 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6300 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6302 /* Fall through. */
6304 case RTX_BIN_ARITH:
6305 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6307 case RTX_TERNARY:
6308 case RTX_BITFIELD_OPS:
6309 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6310 XEXP (x, 0), XEXP (x, 1),
6311 XEXP (x, 2));
6313 case RTX_COMPARE:
6314 case RTX_COMM_COMPARE:
6315 return simplify_relational_operation (code, mode,
6316 ((GET_MODE (XEXP (x, 0))
6317 != VOIDmode)
6318 ? GET_MODE (XEXP (x, 0))
6319 : GET_MODE (XEXP (x, 1))),
6320 XEXP (x, 0),
6321 XEXP (x, 1));
6323 case RTX_EXTRA:
6324 if (code == SUBREG)
6325 return simplify_subreg (mode, SUBREG_REG (x),
6326 GET_MODE (SUBREG_REG (x)),
6327 SUBREG_BYTE (x));
6328 break;
6330 case RTX_OBJ:
6331 if (code == LO_SUM)
6333 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6334 if (GET_CODE (XEXP (x, 0)) == HIGH
6335 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6336 return XEXP (x, 1);
6338 break;
6340 default:
6341 break;
6343 return NULL;