debug/dwarf: support 64-bit DWARF in byte order check
[official-gcc.git] / gcc / simplify-rtx.c
blobc4d6ce7586cef4ffc712e6ad3f2e5c13fb00b9de
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
328 break;
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
340 rtx newx;
342 offset += MEM_OFFSET (x);
344 newx = DECL_RTL (decl);
346 if (MEM_P (newx))
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
375 return x;
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
385 rtx tem;
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
391 return gen_rtx_fmt_e (code, mode, op);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
400 rtx tem;
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
417 rtx tem;
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
443 if (__builtin_expect (fn != NULL, 0))
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
452 switch (GET_RTX_CLASS (code))
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
494 case RTX_EXTRA:
495 if (code == SUBREG)
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
505 break;
507 case RTX_OBJ:
508 if (code == MEM)
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
515 else if (code == LO_SUM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
534 break;
536 default:
537 break;
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
554 if (newvec == vec)
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
561 RTVEC_ELT (newvec, j) = op;
564 break;
566 case 'e':
567 if (XEXP (x, i))
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
577 break;
579 return newx;
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
617 should be used.
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
625 (and:DI X Y)
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 scalar_int_mode int_mode, int_op_mode, subreg_mode;
646 gcc_assert (precision <= op_precision);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
730 and C2. */
731 if (GET_CODE (op) == AND
732 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
734 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
735 && CONST_INT_P (XEXP (op, 1)))
737 rtx op0 = (XEXP (XEXP (op, 0), 0));
738 rtx shift_op = XEXP (XEXP (op, 0), 1);
739 rtx mask_op = XEXP (op, 1);
740 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
741 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
743 if (shift < precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode) >> shift) & mask)
747 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
748 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
749 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
751 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
752 return simplify_gen_binary (AND, mode, op0, mask_op);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
758 changing len. */
759 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
760 && REG_P (XEXP (op, 0))
761 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
762 && CONST_INT_P (XEXP (op, 1))
763 && CONST_INT_P (XEXP (op, 2)))
765 rtx op0 = XEXP (op, 0);
766 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
767 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
768 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
773 pos -= op_precision - precision;
774 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
775 XEXP (op, 1), GEN_INT (pos));
778 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
780 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
781 if (op0)
782 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
783 XEXP (op, 1), XEXP (op, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && SCALAR_INT_MODE_P (mode)
791 && SCALAR_INT_MODE_P (op_mode)
792 && precision >= BITS_PER_WORD
793 && 2 * precision <= op_precision
794 && CONST_INT_P (XEXP (op, 1))
795 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
796 && UINTVAL (XEXP (op, 1)) < op_precision)
798 int byte = subreg_lowpart_offset (mode, op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op) == LSHIFTRT
810 || GET_CODE (op) == ASHIFTRT)
811 && is_a <scalar_int_mode> (mode, &int_mode)
812 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
813 && MEM_P (XEXP (op, 0))
814 && CONST_INT_P (XEXP (op, 1))
815 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
816 && INTVAL (XEXP (op, 1)) > 0
817 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
818 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op, 0))
821 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
824 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
825 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
826 return adjust_address_nv (XEXP (op, 0), int_mode,
827 (WORDS_BIG_ENDIAN
828 ? byte - shifted_bytes
829 : byte + shifted_bytes));
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op) == ABS
835 || GET_CODE (op) == NEG)
836 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
838 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
839 return simplify_gen_unary (GET_CODE (op), mode,
840 XEXP (XEXP (op, 0), 0), mode);
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
843 (truncate:A X). */
844 if (GET_CODE (op) == SUBREG
845 && is_a <scalar_int_mode> (mode, &int_mode)
846 && SCALAR_INT_MODE_P (op_mode)
847 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
848 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
849 && subreg_lowpart_p (op))
851 rtx inner = XEXP (SUBREG_REG (op), 0);
852 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
853 return simplify_gen_unary (TRUNCATE, int_mode, inner,
854 GET_MODE (inner));
855 else
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
875 return NULL_RTX;
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_unary_operation (enum rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
885 rtx trueop, tem;
887 trueop = avoid_constant_pool_reference (op);
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
893 return simplify_unary_operation_1 (code, mode, op);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
919 return in_bits <= out_bits;
922 /* Perform some simplifications we can do even if the operands
923 aren't constant. */
924 static rtx
925 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
927 enum rtx_code reversed;
928 rtx temp;
929 scalar_int_mode inner, int_mode, op_mode, op0_mode;
931 switch (code)
933 case NOT:
934 /* (not (not X)) == X. */
935 if (GET_CODE (op) == NOT)
936 return XEXP (op, 0);
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op)
941 && (mode == BImode || STORE_FLAG_VALUE == -1)
942 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
943 return simplify_gen_relational (reversed, mode, VOIDmode,
944 XEXP (op, 0), XEXP (op, 1));
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == constm1_rtx)
949 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
955 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
956 CONSTM1_RTX (mode));
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op) == XOR
960 && CONST_INT_P (XEXP (op, 1))
961 && (temp = simplify_unary_operation (NOT, mode,
962 XEXP (op, 1), mode)) != 0)
963 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op) == PLUS
967 && CONST_INT_P (XEXP (op, 1))
968 && mode_signbit_p (mode, XEXP (op, 1))
969 && (temp = simplify_unary_operation (NOT, mode,
970 XEXP (op, 1), mode)) != 0)
971 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
978 bother with. */
979 if (GET_CODE (op) == ASHIFT
980 && XEXP (op, 0) == const1_rtx)
982 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
983 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE == -1
990 && is_a <scalar_int_mode> (mode, &int_mode)
991 && GET_CODE (op) == ASHIFTRT
992 && CONST_INT_P (XEXP (op, 1))
993 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
994 return simplify_gen_relational (GE, int_mode, VOIDmode,
995 XEXP (op, 0), const0_rtx);
998 if (partial_subreg_p (op)
999 && subreg_lowpart_p (op)
1000 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1001 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1003 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1004 rtx x;
1006 x = gen_rtx_ROTATE (inner_mode,
1007 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1008 inner_mode),
1009 XEXP (SUBREG_REG (op), 1));
1010 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1011 if (temp)
1012 return temp;
1015 /* Apply De Morgan's laws to reduce number of patterns for machines
1016 with negating logical insns (and-not, nand, etc.). If result has
1017 only one NOT, put it first, since that is how the patterns are
1018 coded. */
1019 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1021 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1022 machine_mode op_mode;
1024 op_mode = GET_MODE (in1);
1025 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1027 op_mode = GET_MODE (in2);
1028 if (op_mode == VOIDmode)
1029 op_mode = mode;
1030 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1032 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1033 std::swap (in1, in2);
1035 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1036 mode, in1, in2);
1039 /* (not (bswap x)) -> (bswap (not x)). */
1040 if (GET_CODE (op) == BSWAP)
1042 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1043 return simplify_gen_unary (BSWAP, mode, x, mode);
1045 break;
1047 case NEG:
1048 /* (neg (neg X)) == X. */
1049 if (GET_CODE (op) == NEG)
1050 return XEXP (op, 0);
1052 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1053 If comparison is not reversible use
1054 x ? y : (neg y). */
1055 if (GET_CODE (op) == IF_THEN_ELSE)
1057 rtx cond = XEXP (op, 0);
1058 rtx true_rtx = XEXP (op, 1);
1059 rtx false_rtx = XEXP (op, 2);
1061 if ((GET_CODE (true_rtx) == NEG
1062 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1063 || (GET_CODE (false_rtx) == NEG
1064 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1066 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1067 temp = reversed_comparison (cond, mode);
1068 else
1070 temp = cond;
1071 std::swap (true_rtx, false_rtx);
1073 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1074 mode, temp, true_rtx, false_rtx);
1078 /* (neg (plus X 1)) can become (not X). */
1079 if (GET_CODE (op) == PLUS
1080 && XEXP (op, 1) == const1_rtx)
1081 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1083 /* Similarly, (neg (not X)) is (plus X 1). */
1084 if (GET_CODE (op) == NOT)
1085 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1086 CONST1_RTX (mode));
1088 /* (neg (minus X Y)) can become (minus Y X). This transformation
1089 isn't safe for modes with signed zeros, since if X and Y are
1090 both +0, (minus Y X) is the same as (minus X Y). If the
1091 rounding mode is towards +infinity (or -infinity) then the two
1092 expressions will be rounded differently. */
1093 if (GET_CODE (op) == MINUS
1094 && !HONOR_SIGNED_ZEROS (mode)
1095 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1096 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1098 if (GET_CODE (op) == PLUS
1099 && !HONOR_SIGNED_ZEROS (mode)
1100 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1102 /* (neg (plus A C)) is simplified to (minus -C A). */
1103 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1104 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1106 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1107 if (temp)
1108 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1111 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1112 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1113 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1116 /* (neg (mult A B)) becomes (mult A (neg B)).
1117 This works even for floating-point values. */
1118 if (GET_CODE (op) == MULT
1119 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1121 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1122 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1125 /* NEG commutes with ASHIFT since it is multiplication. Only do
1126 this if we can then eliminate the NEG (e.g., if the operand
1127 is a constant). */
1128 if (GET_CODE (op) == ASHIFT)
1130 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1131 if (temp)
1132 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1135 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1136 C is equal to the width of MODE minus 1. */
1137 if (GET_CODE (op) == ASHIFTRT
1138 && CONST_INT_P (XEXP (op, 1))
1139 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1140 return simplify_gen_binary (LSHIFTRT, mode,
1141 XEXP (op, 0), XEXP (op, 1));
1143 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1144 C is equal to the width of MODE minus 1. */
1145 if (GET_CODE (op) == LSHIFTRT
1146 && CONST_INT_P (XEXP (op, 1))
1147 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1148 return simplify_gen_binary (ASHIFTRT, mode,
1149 XEXP (op, 0), XEXP (op, 1));
1151 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1152 if (GET_CODE (op) == XOR
1153 && XEXP (op, 1) == const1_rtx
1154 && nonzero_bits (XEXP (op, 0), mode) == 1)
1155 return plus_constant (mode, XEXP (op, 0), -1);
1157 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1158 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1159 if (GET_CODE (op) == LT
1160 && XEXP (op, 1) == const0_rtx
1161 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1163 int_mode = as_a <scalar_int_mode> (mode);
1164 int isize = GET_MODE_PRECISION (inner);
1165 if (STORE_FLAG_VALUE == 1)
1167 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1168 GEN_INT (isize - 1));
1169 if (int_mode == inner)
1170 return temp;
1171 if (GET_MODE_PRECISION (int_mode) > isize)
1172 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1173 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1175 else if (STORE_FLAG_VALUE == -1)
1177 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1178 GEN_INT (isize - 1));
1179 if (int_mode == inner)
1180 return temp;
1181 if (GET_MODE_PRECISION (int_mode) > isize)
1182 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1183 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1186 break;
1188 case TRUNCATE:
1189 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1190 with the umulXi3_highpart patterns. */
1191 if (GET_CODE (op) == LSHIFTRT
1192 && GET_CODE (XEXP (op, 0)) == MULT)
1193 break;
1195 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1197 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1199 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1200 if (temp)
1201 return temp;
1203 /* We can't handle truncation to a partial integer mode here
1204 because we don't know the real bitsize of the partial
1205 integer mode. */
1206 break;
1209 if (GET_MODE (op) != VOIDmode)
1211 temp = simplify_truncation (mode, op, GET_MODE (op));
1212 if (temp)
1213 return temp;
1216 /* If we know that the value is already truncated, we can
1217 replace the TRUNCATE with a SUBREG. */
1218 if (GET_MODE_NUNITS (mode) == 1
1219 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1220 || truncated_to_mode (mode, op)))
1222 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1223 if (temp)
1224 return temp;
1227 /* A truncate of a comparison can be replaced with a subreg if
1228 STORE_FLAG_VALUE permits. This is like the previous test,
1229 but it works even if the comparison is done in a mode larger
1230 than HOST_BITS_PER_WIDE_INT. */
1231 if (HWI_COMPUTABLE_MODE_P (mode)
1232 && COMPARISON_P (op)
1233 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1235 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1236 if (temp)
1237 return temp;
1240 /* A truncate of a memory is just loading the low part of the memory
1241 if we are not changing the meaning of the address. */
1242 if (GET_CODE (op) == MEM
1243 && !VECTOR_MODE_P (mode)
1244 && !MEM_VOLATILE_P (op)
1245 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1247 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1248 if (temp)
1249 return temp;
1252 break;
1254 case FLOAT_TRUNCATE:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1258 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1259 if (GET_CODE (op) == FLOAT_EXTEND
1260 && GET_MODE (XEXP (op, 0)) == mode)
1261 return XEXP (op, 0);
1263 /* (float_truncate:SF (float_truncate:DF foo:XF))
1264 = (float_truncate:SF foo:XF).
1265 This may eliminate double rounding, so it is unsafe.
1267 (float_truncate:SF (float_extend:XF foo:DF))
1268 = (float_truncate:SF foo:DF).
1270 (float_truncate:DF (float_extend:XF foo:SF))
1271 = (float_extend:DF foo:SF). */
1272 if ((GET_CODE (op) == FLOAT_TRUNCATE
1273 && flag_unsafe_math_optimizations)
1274 || GET_CODE (op) == FLOAT_EXTEND)
1275 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1276 > GET_MODE_UNIT_SIZE (mode)
1277 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1278 mode,
1279 XEXP (op, 0), mode);
1281 /* (float_truncate (float x)) is (float x) */
1282 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1283 && (flag_unsafe_math_optimizations
1284 || exact_int_to_float_conversion_p (op)))
1285 return simplify_gen_unary (GET_CODE (op), mode,
1286 XEXP (op, 0),
1287 GET_MODE (XEXP (op, 0)));
1289 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1290 (OP:SF foo:SF) if OP is NEG or ABS. */
1291 if ((GET_CODE (op) == ABS
1292 || GET_CODE (op) == NEG)
1293 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1294 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1295 return simplify_gen_unary (GET_CODE (op), mode,
1296 XEXP (XEXP (op, 0), 0), mode);
1298 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1299 is (float_truncate:SF x). */
1300 if (GET_CODE (op) == SUBREG
1301 && subreg_lowpart_p (op)
1302 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1303 return SUBREG_REG (op);
1304 break;
1306 case FLOAT_EXTEND:
1307 if (DECIMAL_FLOAT_MODE_P (mode))
1308 break;
1310 /* (float_extend (float_extend x)) is (float_extend x)
1312 (float_extend (float x)) is (float x) assuming that double
1313 rounding can't happen.
1315 if (GET_CODE (op) == FLOAT_EXTEND
1316 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1317 && exact_int_to_float_conversion_p (op)))
1318 return simplify_gen_unary (GET_CODE (op), mode,
1319 XEXP (op, 0),
1320 GET_MODE (XEXP (op, 0)));
1322 break;
1324 case ABS:
1325 /* (abs (neg <foo>)) -> (abs <foo>) */
1326 if (GET_CODE (op) == NEG)
1327 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1328 GET_MODE (XEXP (op, 0)));
1330 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1331 do nothing. */
1332 if (GET_MODE (op) == VOIDmode)
1333 break;
1335 /* If operand is something known to be positive, ignore the ABS. */
1336 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1337 || val_signbit_known_clear_p (GET_MODE (op),
1338 nonzero_bits (op, GET_MODE (op))))
1339 return op;
1341 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1342 if (is_a <scalar_int_mode> (mode, &int_mode)
1343 && (num_sign_bit_copies (op, int_mode)
1344 == GET_MODE_PRECISION (int_mode)))
1345 return gen_rtx_NEG (int_mode, op);
1347 break;
1349 case FFS:
1350 /* (ffs (*_extend <X>)) = (ffs <X>) */
1351 if (GET_CODE (op) == SIGN_EXTEND
1352 || GET_CODE (op) == ZERO_EXTEND)
1353 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1354 GET_MODE (XEXP (op, 0)));
1355 break;
1357 case POPCOUNT:
1358 switch (GET_CODE (op))
1360 case BSWAP:
1361 case ZERO_EXTEND:
1362 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1363 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1364 GET_MODE (XEXP (op, 0)));
1366 case ROTATE:
1367 case ROTATERT:
1368 /* Rotations don't affect popcount. */
1369 if (!side_effects_p (XEXP (op, 1)))
1370 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1371 GET_MODE (XEXP (op, 0)));
1372 break;
1374 default:
1375 break;
1377 break;
1379 case PARITY:
1380 switch (GET_CODE (op))
1382 case NOT:
1383 case BSWAP:
1384 case ZERO_EXTEND:
1385 case SIGN_EXTEND:
1386 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1387 GET_MODE (XEXP (op, 0)));
1389 case ROTATE:
1390 case ROTATERT:
1391 /* Rotations don't affect parity. */
1392 if (!side_effects_p (XEXP (op, 1)))
1393 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1394 GET_MODE (XEXP (op, 0)));
1395 break;
1397 default:
1398 break;
1400 break;
1402 case BSWAP:
1403 /* (bswap (bswap x)) -> x. */
1404 if (GET_CODE (op) == BSWAP)
1405 return XEXP (op, 0);
1406 break;
1408 case FLOAT:
1409 /* (float (sign_extend <X>)) = (float <X>). */
1410 if (GET_CODE (op) == SIGN_EXTEND)
1411 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1412 GET_MODE (XEXP (op, 0)));
1413 break;
1415 case SIGN_EXTEND:
1416 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1417 becomes just the MINUS if its mode is MODE. This allows
1418 folding switch statements on machines using casesi (such as
1419 the VAX). */
1420 if (GET_CODE (op) == TRUNCATE
1421 && GET_MODE (XEXP (op, 0)) == mode
1422 && GET_CODE (XEXP (op, 0)) == MINUS
1423 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1424 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1425 return XEXP (op, 0);
1427 /* Extending a widening multiplication should be canonicalized to
1428 a wider widening multiplication. */
1429 if (GET_CODE (op) == MULT)
1431 rtx lhs = XEXP (op, 0);
1432 rtx rhs = XEXP (op, 1);
1433 enum rtx_code lcode = GET_CODE (lhs);
1434 enum rtx_code rcode = GET_CODE (rhs);
1436 /* Widening multiplies usually extend both operands, but sometimes
1437 they use a shift to extract a portion of a register. */
1438 if ((lcode == SIGN_EXTEND
1439 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1440 && (rcode == SIGN_EXTEND
1441 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1443 machine_mode lmode = GET_MODE (lhs);
1444 machine_mode rmode = GET_MODE (rhs);
1445 int bits;
1447 if (lcode == ASHIFTRT)
1448 /* Number of bits not shifted off the end. */
1449 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1450 else /* lcode == SIGN_EXTEND */
1451 /* Size of inner mode. */
1452 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1454 if (rcode == ASHIFTRT)
1455 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1456 else /* rcode == SIGN_EXTEND */
1457 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1459 /* We can only widen multiplies if the result is mathematiclly
1460 equivalent. I.e. if overflow was impossible. */
1461 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1462 return simplify_gen_binary
1463 (MULT, mode,
1464 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1465 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1469 /* Check for a sign extension of a subreg of a promoted
1470 variable, where the promotion is sign-extended, and the
1471 target mode is the same as the variable's promotion. */
1472 if (GET_CODE (op) == SUBREG
1473 && SUBREG_PROMOTED_VAR_P (op)
1474 && SUBREG_PROMOTED_SIGNED_P (op)
1475 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1477 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1478 if (temp)
1479 return temp;
1482 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1483 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1484 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1486 gcc_assert (GET_MODE_PRECISION (mode)
1487 > GET_MODE_PRECISION (GET_MODE (op)));
1488 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1489 GET_MODE (XEXP (op, 0)));
1492 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1493 is (sign_extend:M (subreg:O <X>)) if there is mode with
1494 GET_MODE_BITSIZE (N) - I bits.
1495 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1496 is similarly (zero_extend:M (subreg:O <X>)). */
1497 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1498 && GET_CODE (XEXP (op, 0)) == ASHIFT
1499 && is_a <scalar_int_mode> (mode, &int_mode)
1500 && CONST_INT_P (XEXP (op, 1))
1501 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1502 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1503 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1505 scalar_int_mode tmode;
1506 gcc_assert (GET_MODE_BITSIZE (int_mode)
1507 > GET_MODE_BITSIZE (op_mode));
1508 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1509 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1511 rtx inner =
1512 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1513 if (inner)
1514 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1515 ? SIGN_EXTEND : ZERO_EXTEND,
1516 int_mode, inner, tmode);
1520 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1521 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1522 if (GET_CODE (op) == LSHIFTRT
1523 && CONST_INT_P (XEXP (op, 1))
1524 && XEXP (op, 1) != const0_rtx)
1525 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1527 #if defined(POINTERS_EXTEND_UNSIGNED)
1528 /* As we do not know which address space the pointer is referring to,
1529 we can do this only if the target does not support different pointer
1530 or address modes depending on the address space. */
1531 if (target_default_pointer_address_modes_p ()
1532 && ! POINTERS_EXTEND_UNSIGNED
1533 && mode == Pmode && GET_MODE (op) == ptr_mode
1534 && (CONSTANT_P (op)
1535 || (GET_CODE (op) == SUBREG
1536 && REG_P (SUBREG_REG (op))
1537 && REG_POINTER (SUBREG_REG (op))
1538 && GET_MODE (SUBREG_REG (op)) == Pmode))
1539 && !targetm.have_ptr_extend ())
1541 temp
1542 = convert_memory_address_addr_space_1 (Pmode, op,
1543 ADDR_SPACE_GENERIC, false,
1544 true);
1545 if (temp)
1546 return temp;
1548 #endif
1549 break;
1551 case ZERO_EXTEND:
1552 /* Check for a zero extension of a subreg of a promoted
1553 variable, where the promotion is zero-extended, and the
1554 target mode is the same as the variable's promotion. */
1555 if (GET_CODE (op) == SUBREG
1556 && SUBREG_PROMOTED_VAR_P (op)
1557 && SUBREG_PROMOTED_UNSIGNED_P (op)
1558 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1560 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1561 if (temp)
1562 return temp;
1565 /* Extending a widening multiplication should be canonicalized to
1566 a wider widening multiplication. */
1567 if (GET_CODE (op) == MULT)
1569 rtx lhs = XEXP (op, 0);
1570 rtx rhs = XEXP (op, 1);
1571 enum rtx_code lcode = GET_CODE (lhs);
1572 enum rtx_code rcode = GET_CODE (rhs);
1574 /* Widening multiplies usually extend both operands, but sometimes
1575 they use a shift to extract a portion of a register. */
1576 if ((lcode == ZERO_EXTEND
1577 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1578 && (rcode == ZERO_EXTEND
1579 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1581 machine_mode lmode = GET_MODE (lhs);
1582 machine_mode rmode = GET_MODE (rhs);
1583 int bits;
1585 if (lcode == LSHIFTRT)
1586 /* Number of bits not shifted off the end. */
1587 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1588 else /* lcode == ZERO_EXTEND */
1589 /* Size of inner mode. */
1590 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1592 if (rcode == LSHIFTRT)
1593 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1594 else /* rcode == ZERO_EXTEND */
1595 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1597 /* We can only widen multiplies if the result is mathematiclly
1598 equivalent. I.e. if overflow was impossible. */
1599 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1600 return simplify_gen_binary
1601 (MULT, mode,
1602 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1603 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1607 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1608 if (GET_CODE (op) == ZERO_EXTEND)
1609 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1610 GET_MODE (XEXP (op, 0)));
1612 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1613 is (zero_extend:M (subreg:O <X>)) if there is mode with
1614 GET_MODE_PRECISION (N) - I bits. */
1615 if (GET_CODE (op) == LSHIFTRT
1616 && GET_CODE (XEXP (op, 0)) == ASHIFT
1617 && is_a <scalar_int_mode> (mode, &int_mode)
1618 && CONST_INT_P (XEXP (op, 1))
1619 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1620 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1621 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1623 scalar_int_mode tmode;
1624 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1625 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1627 rtx inner =
1628 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1629 if (inner)
1630 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1631 inner, tmode);
1635 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1636 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1637 of mode N. E.g.
1638 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1639 (and:SI (reg:SI) (const_int 63)). */
1640 if (partial_subreg_p (op)
1641 && is_a <scalar_int_mode> (mode, &int_mode)
1642 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1643 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1644 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1645 && subreg_lowpart_p (op)
1646 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1647 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1649 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1650 return SUBREG_REG (op);
1651 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1652 op0_mode);
1655 #if defined(POINTERS_EXTEND_UNSIGNED)
1656 /* As we do not know which address space the pointer is referring to,
1657 we can do this only if the target does not support different pointer
1658 or address modes depending on the address space. */
1659 if (target_default_pointer_address_modes_p ()
1660 && POINTERS_EXTEND_UNSIGNED > 0
1661 && mode == Pmode && GET_MODE (op) == ptr_mode
1662 && (CONSTANT_P (op)
1663 || (GET_CODE (op) == SUBREG
1664 && REG_P (SUBREG_REG (op))
1665 && REG_POINTER (SUBREG_REG (op))
1666 && GET_MODE (SUBREG_REG (op)) == Pmode))
1667 && !targetm.have_ptr_extend ())
1669 temp
1670 = convert_memory_address_addr_space_1 (Pmode, op,
1671 ADDR_SPACE_GENERIC, false,
1672 true);
1673 if (temp)
1674 return temp;
1676 #endif
1677 break;
1679 default:
1680 break;
1683 return 0;
1686 /* Try to compute the value of a unary operation CODE whose output mode is to
1687 be MODE with input operand OP whose mode was originally OP_MODE.
1688 Return zero if the value cannot be computed. */
1690 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1691 rtx op, machine_mode op_mode)
1693 scalar_int_mode result_mode;
1695 if (code == VEC_DUPLICATE)
1697 gcc_assert (VECTOR_MODE_P (mode));
1698 if (GET_MODE (op) != VOIDmode)
1700 if (!VECTOR_MODE_P (GET_MODE (op)))
1701 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1702 else
1703 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1704 (GET_MODE (op)));
1706 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1707 || GET_CODE (op) == CONST_VECTOR)
1709 int elt_size = GET_MODE_UNIT_SIZE (mode);
1710 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1711 rtvec v = rtvec_alloc (n_elts);
1712 unsigned int i;
1714 if (GET_CODE (op) != CONST_VECTOR)
1715 for (i = 0; i < n_elts; i++)
1716 RTVEC_ELT (v, i) = op;
1717 else
1719 machine_mode inmode = GET_MODE (op);
1720 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1721 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1723 gcc_assert (in_n_elts < n_elts);
1724 gcc_assert ((n_elts % in_n_elts) == 0);
1725 for (i = 0; i < n_elts; i++)
1726 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1728 return gen_rtx_CONST_VECTOR (mode, v);
1732 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1734 int elt_size = GET_MODE_UNIT_SIZE (mode);
1735 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1736 machine_mode opmode = GET_MODE (op);
1737 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1738 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1739 rtvec v = rtvec_alloc (n_elts);
1740 unsigned int i;
1742 gcc_assert (op_n_elts == n_elts);
1743 for (i = 0; i < n_elts; i++)
1745 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1746 CONST_VECTOR_ELT (op, i),
1747 GET_MODE_INNER (opmode));
1748 if (!x)
1749 return 0;
1750 RTVEC_ELT (v, i) = x;
1752 return gen_rtx_CONST_VECTOR (mode, v);
1755 /* The order of these tests is critical so that, for example, we don't
1756 check the wrong mode (input vs. output) for a conversion operation,
1757 such as FIX. At some point, this should be simplified. */
1759 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1761 REAL_VALUE_TYPE d;
1763 if (op_mode == VOIDmode)
1765 /* CONST_INT have VOIDmode as the mode. We assume that all
1766 the bits of the constant are significant, though, this is
1767 a dangerous assumption as many times CONST_INTs are
1768 created and used with garbage in the bits outside of the
1769 precision of the implied mode of the const_int. */
1770 op_mode = MAX_MODE_INT;
1773 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1775 /* Avoid the folding if flag_signaling_nans is on and
1776 operand is a signaling NaN. */
1777 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1778 return 0;
1780 d = real_value_truncate (mode, d);
1781 return const_double_from_real_value (d, mode);
1783 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1785 REAL_VALUE_TYPE d;
1787 if (op_mode == VOIDmode)
1789 /* CONST_INT have VOIDmode as the mode. We assume that all
1790 the bits of the constant are significant, though, this is
1791 a dangerous assumption as many times CONST_INTs are
1792 created and used with garbage in the bits outside of the
1793 precision of the implied mode of the const_int. */
1794 op_mode = MAX_MODE_INT;
1797 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1799 /* Avoid the folding if flag_signaling_nans is on and
1800 operand is a signaling NaN. */
1801 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1802 return 0;
1804 d = real_value_truncate (mode, d);
1805 return const_double_from_real_value (d, mode);
1808 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1810 unsigned int width = GET_MODE_PRECISION (result_mode);
1811 wide_int result;
1812 scalar_int_mode imode = (op_mode == VOIDmode
1813 ? result_mode
1814 : as_a <scalar_int_mode> (op_mode));
1815 rtx_mode_t op0 = rtx_mode_t (op, imode);
1816 int int_value;
1818 #if TARGET_SUPPORTS_WIDE_INT == 0
1819 /* This assert keeps the simplification from producing a result
1820 that cannot be represented in a CONST_DOUBLE but a lot of
1821 upstream callers expect that this function never fails to
1822 simplify something and so you if you added this to the test
1823 above the code would die later anyway. If this assert
1824 happens, you just need to make the port support wide int. */
1825 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1826 #endif
1828 switch (code)
1830 case NOT:
1831 result = wi::bit_not (op0);
1832 break;
1834 case NEG:
1835 result = wi::neg (op0);
1836 break;
1838 case ABS:
1839 result = wi::abs (op0);
1840 break;
1842 case FFS:
1843 result = wi::shwi (wi::ffs (op0), result_mode);
1844 break;
1846 case CLZ:
1847 if (wi::ne_p (op0, 0))
1848 int_value = wi::clz (op0);
1849 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1850 int_value = GET_MODE_PRECISION (imode);
1851 result = wi::shwi (int_value, result_mode);
1852 break;
1854 case CLRSB:
1855 result = wi::shwi (wi::clrsb (op0), result_mode);
1856 break;
1858 case CTZ:
1859 if (wi::ne_p (op0, 0))
1860 int_value = wi::ctz (op0);
1861 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1862 int_value = GET_MODE_PRECISION (imode);
1863 result = wi::shwi (int_value, result_mode);
1864 break;
1866 case POPCOUNT:
1867 result = wi::shwi (wi::popcount (op0), result_mode);
1868 break;
1870 case PARITY:
1871 result = wi::shwi (wi::parity (op0), result_mode);
1872 break;
1874 case BSWAP:
1875 result = wide_int (op0).bswap ();
1876 break;
1878 case TRUNCATE:
1879 case ZERO_EXTEND:
1880 result = wide_int::from (op0, width, UNSIGNED);
1881 break;
1883 case SIGN_EXTEND:
1884 result = wide_int::from (op0, width, SIGNED);
1885 break;
1887 case SQRT:
1888 default:
1889 return 0;
1892 return immed_wide_int_const (result, result_mode);
1895 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1896 && SCALAR_FLOAT_MODE_P (mode)
1897 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1899 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1900 switch (code)
1902 case SQRT:
1903 return 0;
1904 case ABS:
1905 d = real_value_abs (&d);
1906 break;
1907 case NEG:
1908 d = real_value_negate (&d);
1909 break;
1910 case FLOAT_TRUNCATE:
1911 /* Don't perform the operation if flag_signaling_nans is on
1912 and the operand is a signaling NaN. */
1913 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1914 return NULL_RTX;
1915 d = real_value_truncate (mode, d);
1916 break;
1917 case FLOAT_EXTEND:
1918 /* Don't perform the operation if flag_signaling_nans is on
1919 and the operand is a signaling NaN. */
1920 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1921 return NULL_RTX;
1922 /* All this does is change the mode, unless changing
1923 mode class. */
1924 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1925 real_convert (&d, mode, &d);
1926 break;
1927 case FIX:
1928 /* Don't perform the operation if flag_signaling_nans is on
1929 and the operand is a signaling NaN. */
1930 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1931 return NULL_RTX;
1932 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1933 break;
1934 case NOT:
1936 long tmp[4];
1937 int i;
1939 real_to_target (tmp, &d, GET_MODE (op));
1940 for (i = 0; i < 4; i++)
1941 tmp[i] = ~tmp[i];
1942 real_from_target (&d, tmp, mode);
1943 break;
1945 default:
1946 gcc_unreachable ();
1948 return const_double_from_real_value (d, mode);
1950 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1951 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1952 && is_int_mode (mode, &result_mode))
1954 unsigned int width = GET_MODE_PRECISION (result_mode);
1955 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1956 operators are intentionally left unspecified (to ease implementation
1957 by target backends), for consistency, this routine implements the
1958 same semantics for constant folding as used by the middle-end. */
1960 /* This was formerly used only for non-IEEE float.
1961 eggert@twinsun.com says it is safe for IEEE also. */
1962 REAL_VALUE_TYPE t;
1963 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1964 wide_int wmax, wmin;
1965 /* This is part of the abi to real_to_integer, but we check
1966 things before making this call. */
1967 bool fail;
1969 switch (code)
1971 case FIX:
1972 if (REAL_VALUE_ISNAN (*x))
1973 return const0_rtx;
1975 /* Test against the signed upper bound. */
1976 wmax = wi::max_value (width, SIGNED);
1977 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1978 if (real_less (&t, x))
1979 return immed_wide_int_const (wmax, mode);
1981 /* Test against the signed lower bound. */
1982 wmin = wi::min_value (width, SIGNED);
1983 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1984 if (real_less (x, &t))
1985 return immed_wide_int_const (wmin, mode);
1987 return immed_wide_int_const (real_to_integer (x, &fail, width),
1988 mode);
1990 case UNSIGNED_FIX:
1991 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1992 return const0_rtx;
1994 /* Test against the unsigned upper bound. */
1995 wmax = wi::max_value (width, UNSIGNED);
1996 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1997 if (real_less (&t, x))
1998 return immed_wide_int_const (wmax, mode);
2000 return immed_wide_int_const (real_to_integer (x, &fail, width),
2001 mode);
2003 default:
2004 gcc_unreachable ();
2008 return NULL_RTX;
2011 /* Subroutine of simplify_binary_operation to simplify a binary operation
2012 CODE that can commute with byte swapping, with result mode MODE and
2013 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2014 Return zero if no simplification or canonicalization is possible. */
2016 static rtx
2017 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2018 rtx op0, rtx op1)
2020 rtx tem;
2022 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2023 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2025 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2026 simplify_gen_unary (BSWAP, mode, op1, mode));
2027 return simplify_gen_unary (BSWAP, mode, tem, mode);
2030 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2031 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2033 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2034 return simplify_gen_unary (BSWAP, mode, tem, mode);
2037 return NULL_RTX;
2040 /* Subroutine of simplify_binary_operation to simplify a commutative,
2041 associative binary operation CODE with result mode MODE, operating
2042 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2043 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2044 canonicalization is possible. */
2046 static rtx
2047 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2048 rtx op0, rtx op1)
2050 rtx tem;
2052 /* Linearize the operator to the left. */
2053 if (GET_CODE (op1) == code)
2055 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2056 if (GET_CODE (op0) == code)
2058 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2059 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2062 /* "a op (b op c)" becomes "(b op c) op a". */
2063 if (! swap_commutative_operands_p (op1, op0))
2064 return simplify_gen_binary (code, mode, op1, op0);
2066 std::swap (op0, op1);
2069 if (GET_CODE (op0) == code)
2071 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2072 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2074 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2075 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2078 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2079 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2080 if (tem != 0)
2081 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2083 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2084 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2085 if (tem != 0)
2086 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2089 return 0;
2093 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2094 and OP1. Return 0 if no simplification is possible.
2096 Don't use this for relational operations such as EQ or LT.
2097 Use simplify_relational_operation instead. */
2099 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2100 rtx op0, rtx op1)
2102 rtx trueop0, trueop1;
2103 rtx tem;
2105 /* Relational operations don't work here. We must know the mode
2106 of the operands in order to do the comparison correctly.
2107 Assuming a full word can give incorrect results.
2108 Consider comparing 128 with -128 in QImode. */
2109 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2110 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2112 /* Make sure the constant is second. */
2113 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2114 && swap_commutative_operands_p (op0, op1))
2115 std::swap (op0, op1);
2117 trueop0 = avoid_constant_pool_reference (op0);
2118 trueop1 = avoid_constant_pool_reference (op1);
2120 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2121 if (tem)
2122 return tem;
2123 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2125 if (tem)
2126 return tem;
2128 /* If the above steps did not result in a simplification and op0 or op1
2129 were constant pool references, use the referenced constants directly. */
2130 if (trueop0 != op0 || trueop1 != op1)
2131 return simplify_gen_binary (code, mode, trueop0, trueop1);
2133 return NULL_RTX;
2136 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2137 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2138 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2139 actual constants. */
2141 static rtx
2142 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2143 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2145 rtx tem, reversed, opleft, opright;
2146 HOST_WIDE_INT val;
2147 unsigned int width = GET_MODE_PRECISION (mode);
2148 scalar_int_mode int_mode, inner_mode;
2150 /* Even if we can't compute a constant result,
2151 there are some cases worth simplifying. */
2153 switch (code)
2155 case PLUS:
2156 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2157 when x is NaN, infinite, or finite and nonzero. They aren't
2158 when x is -0 and the rounding mode is not towards -infinity,
2159 since (-0) + 0 is then 0. */
2160 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2161 return op0;
2163 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2164 transformations are safe even for IEEE. */
2165 if (GET_CODE (op0) == NEG)
2166 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2167 else if (GET_CODE (op1) == NEG)
2168 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2170 /* (~a) + 1 -> -a */
2171 if (INTEGRAL_MODE_P (mode)
2172 && GET_CODE (op0) == NOT
2173 && trueop1 == const1_rtx)
2174 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2176 /* Handle both-operands-constant cases. We can only add
2177 CONST_INTs to constants since the sum of relocatable symbols
2178 can't be handled by most assemblers. Don't add CONST_INT
2179 to CONST_INT since overflow won't be computed properly if wider
2180 than HOST_BITS_PER_WIDE_INT. */
2182 if ((GET_CODE (op0) == CONST
2183 || GET_CODE (op0) == SYMBOL_REF
2184 || GET_CODE (op0) == LABEL_REF)
2185 && CONST_INT_P (op1))
2186 return plus_constant (mode, op0, INTVAL (op1));
2187 else if ((GET_CODE (op1) == CONST
2188 || GET_CODE (op1) == SYMBOL_REF
2189 || GET_CODE (op1) == LABEL_REF)
2190 && CONST_INT_P (op0))
2191 return plus_constant (mode, op1, INTVAL (op0));
2193 /* See if this is something like X * C - X or vice versa or
2194 if the multiplication is written as a shift. If so, we can
2195 distribute and make a new multiply, shift, or maybe just
2196 have X (if C is 2 in the example above). But don't make
2197 something more expensive than we had before. */
2199 if (is_a <scalar_int_mode> (mode, &int_mode))
2201 rtx lhs = op0, rhs = op1;
2203 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2204 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2206 if (GET_CODE (lhs) == NEG)
2208 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2209 lhs = XEXP (lhs, 0);
2211 else if (GET_CODE (lhs) == MULT
2212 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2214 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2215 lhs = XEXP (lhs, 0);
2217 else if (GET_CODE (lhs) == ASHIFT
2218 && CONST_INT_P (XEXP (lhs, 1))
2219 && INTVAL (XEXP (lhs, 1)) >= 0
2220 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2222 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2223 GET_MODE_PRECISION (int_mode));
2224 lhs = XEXP (lhs, 0);
2227 if (GET_CODE (rhs) == NEG)
2229 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2230 rhs = XEXP (rhs, 0);
2232 else if (GET_CODE (rhs) == MULT
2233 && CONST_INT_P (XEXP (rhs, 1)))
2235 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2236 rhs = XEXP (rhs, 0);
2238 else if (GET_CODE (rhs) == ASHIFT
2239 && CONST_INT_P (XEXP (rhs, 1))
2240 && INTVAL (XEXP (rhs, 1)) >= 0
2241 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2243 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2244 GET_MODE_PRECISION (int_mode));
2245 rhs = XEXP (rhs, 0);
2248 if (rtx_equal_p (lhs, rhs))
2250 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2251 rtx coeff;
2252 bool speed = optimize_function_for_speed_p (cfun);
2254 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2256 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2257 return (set_src_cost (tem, int_mode, speed)
2258 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2262 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2263 if (CONST_SCALAR_INT_P (op1)
2264 && GET_CODE (op0) == XOR
2265 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2266 && mode_signbit_p (mode, op1))
2267 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2268 simplify_gen_binary (XOR, mode, op1,
2269 XEXP (op0, 1)));
2271 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2272 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2273 && GET_CODE (op0) == MULT
2274 && GET_CODE (XEXP (op0, 0)) == NEG)
2276 rtx in1, in2;
2278 in1 = XEXP (XEXP (op0, 0), 0);
2279 in2 = XEXP (op0, 1);
2280 return simplify_gen_binary (MINUS, mode, op1,
2281 simplify_gen_binary (MULT, mode,
2282 in1, in2));
2285 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2286 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2287 is 1. */
2288 if (COMPARISON_P (op0)
2289 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2290 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2291 && (reversed = reversed_comparison (op0, mode)))
2292 return
2293 simplify_gen_unary (NEG, mode, reversed, mode);
2295 /* If one of the operands is a PLUS or a MINUS, see if we can
2296 simplify this by the associative law.
2297 Don't use the associative law for floating point.
2298 The inaccuracy makes it nonassociative,
2299 and subtle programs can break if operations are associated. */
2301 if (INTEGRAL_MODE_P (mode)
2302 && (plus_minus_operand_p (op0)
2303 || plus_minus_operand_p (op1))
2304 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2305 return tem;
2307 /* Reassociate floating point addition only when the user
2308 specifies associative math operations. */
2309 if (FLOAT_MODE_P (mode)
2310 && flag_associative_math)
2312 tem = simplify_associative_operation (code, mode, op0, op1);
2313 if (tem)
2314 return tem;
2316 break;
2318 case COMPARE:
2319 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2320 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2321 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2322 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2324 rtx xop00 = XEXP (op0, 0);
2325 rtx xop10 = XEXP (op1, 0);
2327 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2328 return xop00;
2330 if (REG_P (xop00) && REG_P (xop10)
2331 && REGNO (xop00) == REGNO (xop10)
2332 && GET_MODE (xop00) == mode
2333 && GET_MODE (xop10) == mode
2334 && GET_MODE_CLASS (mode) == MODE_CC)
2335 return xop00;
2337 break;
2339 case MINUS:
2340 /* We can't assume x-x is 0 even with non-IEEE floating point,
2341 but since it is zero except in very strange circumstances, we
2342 will treat it as zero with -ffinite-math-only. */
2343 if (rtx_equal_p (trueop0, trueop1)
2344 && ! side_effects_p (op0)
2345 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2346 return CONST0_RTX (mode);
2348 /* Change subtraction from zero into negation. (0 - x) is the
2349 same as -x when x is NaN, infinite, or finite and nonzero.
2350 But if the mode has signed zeros, and does not round towards
2351 -infinity, then 0 - 0 is 0, not -0. */
2352 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2353 return simplify_gen_unary (NEG, mode, op1, mode);
2355 /* (-1 - a) is ~a, unless the expression contains symbolic
2356 constants, in which case not retaining additions and
2357 subtractions could cause invalid assembly to be produced. */
2358 if (trueop0 == constm1_rtx
2359 && !contains_symbolic_reference_p (op1))
2360 return simplify_gen_unary (NOT, mode, op1, mode);
2362 /* Subtracting 0 has no effect unless the mode has signed zeros
2363 and supports rounding towards -infinity. In such a case,
2364 0 - 0 is -0. */
2365 if (!(HONOR_SIGNED_ZEROS (mode)
2366 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2367 && trueop1 == CONST0_RTX (mode))
2368 return op0;
2370 /* See if this is something like X * C - X or vice versa or
2371 if the multiplication is written as a shift. If so, we can
2372 distribute and make a new multiply, shift, or maybe just
2373 have X (if C is 2 in the example above). But don't make
2374 something more expensive than we had before. */
2376 if (is_a <scalar_int_mode> (mode, &int_mode))
2378 rtx lhs = op0, rhs = op1;
2380 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2381 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2383 if (GET_CODE (lhs) == NEG)
2385 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2386 lhs = XEXP (lhs, 0);
2388 else if (GET_CODE (lhs) == MULT
2389 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2391 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2392 lhs = XEXP (lhs, 0);
2394 else if (GET_CODE (lhs) == ASHIFT
2395 && CONST_INT_P (XEXP (lhs, 1))
2396 && INTVAL (XEXP (lhs, 1)) >= 0
2397 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2399 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2400 GET_MODE_PRECISION (int_mode));
2401 lhs = XEXP (lhs, 0);
2404 if (GET_CODE (rhs) == NEG)
2406 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2407 rhs = XEXP (rhs, 0);
2409 else if (GET_CODE (rhs) == MULT
2410 && CONST_INT_P (XEXP (rhs, 1)))
2412 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2413 rhs = XEXP (rhs, 0);
2415 else if (GET_CODE (rhs) == ASHIFT
2416 && CONST_INT_P (XEXP (rhs, 1))
2417 && INTVAL (XEXP (rhs, 1)) >= 0
2418 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2420 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2421 GET_MODE_PRECISION (int_mode));
2422 negcoeff1 = -negcoeff1;
2423 rhs = XEXP (rhs, 0);
2426 if (rtx_equal_p (lhs, rhs))
2428 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2429 rtx coeff;
2430 bool speed = optimize_function_for_speed_p (cfun);
2432 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2434 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2435 return (set_src_cost (tem, int_mode, speed)
2436 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2440 /* (a - (-b)) -> (a + b). True even for IEEE. */
2441 if (GET_CODE (op1) == NEG)
2442 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2444 /* (-x - c) may be simplified as (-c - x). */
2445 if (GET_CODE (op0) == NEG
2446 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2448 tem = simplify_unary_operation (NEG, mode, op1, mode);
2449 if (tem)
2450 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2453 /* Don't let a relocatable value get a negative coeff. */
2454 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2455 return simplify_gen_binary (PLUS, mode,
2456 op0,
2457 neg_const_int (mode, op1));
2459 /* (x - (x & y)) -> (x & ~y) */
2460 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2462 if (rtx_equal_p (op0, XEXP (op1, 0)))
2464 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2465 GET_MODE (XEXP (op1, 1)));
2466 return simplify_gen_binary (AND, mode, op0, tem);
2468 if (rtx_equal_p (op0, XEXP (op1, 1)))
2470 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2471 GET_MODE (XEXP (op1, 0)));
2472 return simplify_gen_binary (AND, mode, op0, tem);
2476 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2477 by reversing the comparison code if valid. */
2478 if (STORE_FLAG_VALUE == 1
2479 && trueop0 == const1_rtx
2480 && COMPARISON_P (op1)
2481 && (reversed = reversed_comparison (op1, mode)))
2482 return reversed;
2484 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2485 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2486 && GET_CODE (op1) == MULT
2487 && GET_CODE (XEXP (op1, 0)) == NEG)
2489 rtx in1, in2;
2491 in1 = XEXP (XEXP (op1, 0), 0);
2492 in2 = XEXP (op1, 1);
2493 return simplify_gen_binary (PLUS, mode,
2494 simplify_gen_binary (MULT, mode,
2495 in1, in2),
2496 op0);
2499 /* Canonicalize (minus (neg A) (mult B C)) to
2500 (minus (mult (neg B) C) A). */
2501 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2502 && GET_CODE (op1) == MULT
2503 && GET_CODE (op0) == NEG)
2505 rtx in1, in2;
2507 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2508 in2 = XEXP (op1, 1);
2509 return simplify_gen_binary (MINUS, mode,
2510 simplify_gen_binary (MULT, mode,
2511 in1, in2),
2512 XEXP (op0, 0));
2515 /* If one of the operands is a PLUS or a MINUS, see if we can
2516 simplify this by the associative law. This will, for example,
2517 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2518 Don't use the associative law for floating point.
2519 The inaccuracy makes it nonassociative,
2520 and subtle programs can break if operations are associated. */
2522 if (INTEGRAL_MODE_P (mode)
2523 && (plus_minus_operand_p (op0)
2524 || plus_minus_operand_p (op1))
2525 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2526 return tem;
2527 break;
2529 case MULT:
2530 if (trueop1 == constm1_rtx)
2531 return simplify_gen_unary (NEG, mode, op0, mode);
2533 if (GET_CODE (op0) == NEG)
2535 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2536 /* If op1 is a MULT as well and simplify_unary_operation
2537 just moved the NEG to the second operand, simplify_gen_binary
2538 below could through simplify_associative_operation move
2539 the NEG around again and recurse endlessly. */
2540 if (temp
2541 && GET_CODE (op1) == MULT
2542 && GET_CODE (temp) == MULT
2543 && XEXP (op1, 0) == XEXP (temp, 0)
2544 && GET_CODE (XEXP (temp, 1)) == NEG
2545 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2546 temp = NULL_RTX;
2547 if (temp)
2548 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2550 if (GET_CODE (op1) == NEG)
2552 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2553 /* If op0 is a MULT as well and simplify_unary_operation
2554 just moved the NEG to the second operand, simplify_gen_binary
2555 below could through simplify_associative_operation move
2556 the NEG around again and recurse endlessly. */
2557 if (temp
2558 && GET_CODE (op0) == MULT
2559 && GET_CODE (temp) == MULT
2560 && XEXP (op0, 0) == XEXP (temp, 0)
2561 && GET_CODE (XEXP (temp, 1)) == NEG
2562 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2563 temp = NULL_RTX;
2564 if (temp)
2565 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2568 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2569 x is NaN, since x * 0 is then also NaN. Nor is it valid
2570 when the mode has signed zeros, since multiplying a negative
2571 number by 0 will give -0, not 0. */
2572 if (!HONOR_NANS (mode)
2573 && !HONOR_SIGNED_ZEROS (mode)
2574 && trueop1 == CONST0_RTX (mode)
2575 && ! side_effects_p (op0))
2576 return op1;
2578 /* In IEEE floating point, x*1 is not equivalent to x for
2579 signalling NaNs. */
2580 if (!HONOR_SNANS (mode)
2581 && trueop1 == CONST1_RTX (mode))
2582 return op0;
2584 /* Convert multiply by constant power of two into shift. */
2585 if (CONST_SCALAR_INT_P (trueop1))
2587 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2588 if (val >= 0)
2589 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2592 /* x*2 is x+x and x*(-1) is -x */
2593 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2594 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2595 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2596 && GET_MODE (op0) == mode)
2598 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2600 if (real_equal (d1, &dconst2))
2601 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2603 if (!HONOR_SNANS (mode)
2604 && real_equal (d1, &dconstm1))
2605 return simplify_gen_unary (NEG, mode, op0, mode);
2608 /* Optimize -x * -x as x * x. */
2609 if (FLOAT_MODE_P (mode)
2610 && GET_CODE (op0) == NEG
2611 && GET_CODE (op1) == NEG
2612 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2613 && !side_effects_p (XEXP (op0, 0)))
2614 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2616 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2617 if (SCALAR_FLOAT_MODE_P (mode)
2618 && GET_CODE (op0) == ABS
2619 && GET_CODE (op1) == ABS
2620 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2621 && !side_effects_p (XEXP (op0, 0)))
2622 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2624 /* Reassociate multiplication, but for floating point MULTs
2625 only when the user specifies unsafe math optimizations. */
2626 if (! FLOAT_MODE_P (mode)
2627 || flag_unsafe_math_optimizations)
2629 tem = simplify_associative_operation (code, mode, op0, op1);
2630 if (tem)
2631 return tem;
2633 break;
2635 case IOR:
2636 if (trueop1 == CONST0_RTX (mode))
2637 return op0;
2638 if (INTEGRAL_MODE_P (mode)
2639 && trueop1 == CONSTM1_RTX (mode)
2640 && !side_effects_p (op0))
2641 return op1;
2642 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2643 return op0;
2644 /* A | (~A) -> -1 */
2645 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2646 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2647 && ! side_effects_p (op0)
2648 && SCALAR_INT_MODE_P (mode))
2649 return constm1_rtx;
2651 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2652 if (CONST_INT_P (op1)
2653 && HWI_COMPUTABLE_MODE_P (mode)
2654 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2655 && !side_effects_p (op0))
2656 return op1;
2658 /* Canonicalize (X & C1) | C2. */
2659 if (GET_CODE (op0) == AND
2660 && CONST_INT_P (trueop1)
2661 && CONST_INT_P (XEXP (op0, 1)))
2663 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2664 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2665 HOST_WIDE_INT c2 = INTVAL (trueop1);
2667 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2668 if ((c1 & c2) == c1
2669 && !side_effects_p (XEXP (op0, 0)))
2670 return trueop1;
2672 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2673 if (((c1|c2) & mask) == mask)
2674 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2677 /* Convert (A & B) | A to A. */
2678 if (GET_CODE (op0) == AND
2679 && (rtx_equal_p (XEXP (op0, 0), op1)
2680 || rtx_equal_p (XEXP (op0, 1), op1))
2681 && ! side_effects_p (XEXP (op0, 0))
2682 && ! side_effects_p (XEXP (op0, 1)))
2683 return op1;
2685 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2686 mode size to (rotate A CX). */
2688 if (GET_CODE (op1) == ASHIFT
2689 || GET_CODE (op1) == SUBREG)
2691 opleft = op1;
2692 opright = op0;
2694 else
2696 opright = op1;
2697 opleft = op0;
2700 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2701 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2702 && CONST_INT_P (XEXP (opleft, 1))
2703 && CONST_INT_P (XEXP (opright, 1))
2704 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2705 == GET_MODE_PRECISION (mode)))
2706 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2708 /* Same, but for ashift that has been "simplified" to a wider mode
2709 by simplify_shift_const. */
2711 if (GET_CODE (opleft) == SUBREG
2712 && is_a <scalar_int_mode> (mode, &int_mode)
2713 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2714 &inner_mode)
2715 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2716 && GET_CODE (opright) == LSHIFTRT
2717 && GET_CODE (XEXP (opright, 0)) == SUBREG
2718 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2719 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2720 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2721 SUBREG_REG (XEXP (opright, 0)))
2722 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2723 && CONST_INT_P (XEXP (opright, 1))
2724 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2725 + INTVAL (XEXP (opright, 1))
2726 == GET_MODE_PRECISION (int_mode)))
2727 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2728 XEXP (SUBREG_REG (opleft), 1));
2730 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2731 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2732 the PLUS does not affect any of the bits in OP1: then we can do
2733 the IOR as a PLUS and we can associate. This is valid if OP1
2734 can be safely shifted left C bits. */
2735 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2736 && GET_CODE (XEXP (op0, 0)) == PLUS
2737 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2738 && CONST_INT_P (XEXP (op0, 1))
2739 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2741 int count = INTVAL (XEXP (op0, 1));
2742 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2744 if (mask >> count == INTVAL (trueop1)
2745 && trunc_int_for_mode (mask, mode) == mask
2746 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2747 return simplify_gen_binary (ASHIFTRT, mode,
2748 plus_constant (mode, XEXP (op0, 0),
2749 mask),
2750 XEXP (op0, 1));
2753 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2754 if (tem)
2755 return tem;
2757 tem = simplify_associative_operation (code, mode, op0, op1);
2758 if (tem)
2759 return tem;
2760 break;
2762 case XOR:
2763 if (trueop1 == CONST0_RTX (mode))
2764 return op0;
2765 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2766 return simplify_gen_unary (NOT, mode, op0, mode);
2767 if (rtx_equal_p (trueop0, trueop1)
2768 && ! side_effects_p (op0)
2769 && GET_MODE_CLASS (mode) != MODE_CC)
2770 return CONST0_RTX (mode);
2772 /* Canonicalize XOR of the most significant bit to PLUS. */
2773 if (CONST_SCALAR_INT_P (op1)
2774 && mode_signbit_p (mode, op1))
2775 return simplify_gen_binary (PLUS, mode, op0, op1);
2776 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2777 if (CONST_SCALAR_INT_P (op1)
2778 && GET_CODE (op0) == PLUS
2779 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2780 && mode_signbit_p (mode, XEXP (op0, 1)))
2781 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2782 simplify_gen_binary (XOR, mode, op1,
2783 XEXP (op0, 1)));
2785 /* If we are XORing two things that have no bits in common,
2786 convert them into an IOR. This helps to detect rotation encoded
2787 using those methods and possibly other simplifications. */
2789 if (HWI_COMPUTABLE_MODE_P (mode)
2790 && (nonzero_bits (op0, mode)
2791 & nonzero_bits (op1, mode)) == 0)
2792 return (simplify_gen_binary (IOR, mode, op0, op1));
2794 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2795 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2796 (NOT y). */
2798 int num_negated = 0;
2800 if (GET_CODE (op0) == NOT)
2801 num_negated++, op0 = XEXP (op0, 0);
2802 if (GET_CODE (op1) == NOT)
2803 num_negated++, op1 = XEXP (op1, 0);
2805 if (num_negated == 2)
2806 return simplify_gen_binary (XOR, mode, op0, op1);
2807 else if (num_negated == 1)
2808 return simplify_gen_unary (NOT, mode,
2809 simplify_gen_binary (XOR, mode, op0, op1),
2810 mode);
2813 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2814 correspond to a machine insn or result in further simplifications
2815 if B is a constant. */
2817 if (GET_CODE (op0) == AND
2818 && rtx_equal_p (XEXP (op0, 1), op1)
2819 && ! side_effects_p (op1))
2820 return simplify_gen_binary (AND, mode,
2821 simplify_gen_unary (NOT, mode,
2822 XEXP (op0, 0), mode),
2823 op1);
2825 else if (GET_CODE (op0) == AND
2826 && rtx_equal_p (XEXP (op0, 0), op1)
2827 && ! side_effects_p (op1))
2828 return simplify_gen_binary (AND, mode,
2829 simplify_gen_unary (NOT, mode,
2830 XEXP (op0, 1), mode),
2831 op1);
2833 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2834 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2835 out bits inverted twice and not set by C. Similarly, given
2836 (xor (and (xor A B) C) D), simplify without inverting C in
2837 the xor operand: (xor (and A C) (B&C)^D).
2839 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2840 && GET_CODE (XEXP (op0, 0)) == XOR
2841 && CONST_INT_P (op1)
2842 && CONST_INT_P (XEXP (op0, 1))
2843 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2845 enum rtx_code op = GET_CODE (op0);
2846 rtx a = XEXP (XEXP (op0, 0), 0);
2847 rtx b = XEXP (XEXP (op0, 0), 1);
2848 rtx c = XEXP (op0, 1);
2849 rtx d = op1;
2850 HOST_WIDE_INT bval = INTVAL (b);
2851 HOST_WIDE_INT cval = INTVAL (c);
2852 HOST_WIDE_INT dval = INTVAL (d);
2853 HOST_WIDE_INT xcval;
2855 if (op == IOR)
2856 xcval = ~cval;
2857 else
2858 xcval = cval;
2860 return simplify_gen_binary (XOR, mode,
2861 simplify_gen_binary (op, mode, a, c),
2862 gen_int_mode ((bval & xcval) ^ dval,
2863 mode));
2866 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2867 we can transform like this:
2868 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2869 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2870 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2871 Attempt a few simplifications when B and C are both constants. */
2872 if (GET_CODE (op0) == AND
2873 && CONST_INT_P (op1)
2874 && CONST_INT_P (XEXP (op0, 1)))
2876 rtx a = XEXP (op0, 0);
2877 rtx b = XEXP (op0, 1);
2878 rtx c = op1;
2879 HOST_WIDE_INT bval = INTVAL (b);
2880 HOST_WIDE_INT cval = INTVAL (c);
2882 /* Instead of computing ~A&C, we compute its negated value,
2883 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2884 optimize for sure. If it does not simplify, we still try
2885 to compute ~A&C below, but since that always allocates
2886 RTL, we don't try that before committing to returning a
2887 simplified expression. */
2888 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2889 GEN_INT (~cval));
2891 if ((~cval & bval) == 0)
2893 rtx na_c = NULL_RTX;
2894 if (n_na_c)
2895 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2896 else
2898 /* If ~A does not simplify, don't bother: we don't
2899 want to simplify 2 operations into 3, and if na_c
2900 were to simplify with na, n_na_c would have
2901 simplified as well. */
2902 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2903 if (na)
2904 na_c = simplify_gen_binary (AND, mode, na, c);
2907 /* Try to simplify ~A&C | ~B&C. */
2908 if (na_c != NULL_RTX)
2909 return simplify_gen_binary (IOR, mode, na_c,
2910 gen_int_mode (~bval & cval, mode));
2912 else
2914 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2915 if (n_na_c == CONSTM1_RTX (mode))
2917 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2918 gen_int_mode (~cval & bval,
2919 mode));
2920 return simplify_gen_binary (IOR, mode, a_nc_b,
2921 gen_int_mode (~bval & cval,
2922 mode));
2927 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2928 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2929 machines, and also has shorter instruction path length. */
2930 if (GET_CODE (op0) == AND
2931 && GET_CODE (XEXP (op0, 0)) == XOR
2932 && CONST_INT_P (XEXP (op0, 1))
2933 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2935 rtx a = trueop1;
2936 rtx b = XEXP (XEXP (op0, 0), 1);
2937 rtx c = XEXP (op0, 1);
2938 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2939 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2940 rtx bc = simplify_gen_binary (AND, mode, b, c);
2941 return simplify_gen_binary (IOR, mode, a_nc, bc);
2943 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2944 else if (GET_CODE (op0) == AND
2945 && GET_CODE (XEXP (op0, 0)) == XOR
2946 && CONST_INT_P (XEXP (op0, 1))
2947 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2949 rtx a = XEXP (XEXP (op0, 0), 0);
2950 rtx b = trueop1;
2951 rtx c = XEXP (op0, 1);
2952 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2953 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2954 rtx ac = simplify_gen_binary (AND, mode, a, c);
2955 return simplify_gen_binary (IOR, mode, ac, b_nc);
2958 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2959 comparison if STORE_FLAG_VALUE is 1. */
2960 if (STORE_FLAG_VALUE == 1
2961 && trueop1 == const1_rtx
2962 && COMPARISON_P (op0)
2963 && (reversed = reversed_comparison (op0, mode)))
2964 return reversed;
2966 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2967 is (lt foo (const_int 0)), so we can perform the above
2968 simplification if STORE_FLAG_VALUE is 1. */
2970 if (is_a <scalar_int_mode> (mode, &int_mode)
2971 && STORE_FLAG_VALUE == 1
2972 && trueop1 == const1_rtx
2973 && GET_CODE (op0) == LSHIFTRT
2974 && CONST_INT_P (XEXP (op0, 1))
2975 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
2976 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
2978 /* (xor (comparison foo bar) (const_int sign-bit))
2979 when STORE_FLAG_VALUE is the sign bit. */
2980 if (is_a <scalar_int_mode> (mode, &int_mode)
2981 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
2982 && trueop1 == const_true_rtx
2983 && COMPARISON_P (op0)
2984 && (reversed = reversed_comparison (op0, int_mode)))
2985 return reversed;
2987 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2988 if (tem)
2989 return tem;
2991 tem = simplify_associative_operation (code, mode, op0, op1);
2992 if (tem)
2993 return tem;
2994 break;
2996 case AND:
2997 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2998 return trueop1;
2999 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3000 return op0;
3001 if (HWI_COMPUTABLE_MODE_P (mode))
3003 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3004 HOST_WIDE_INT nzop1;
3005 if (CONST_INT_P (trueop1))
3007 HOST_WIDE_INT val1 = INTVAL (trueop1);
3008 /* If we are turning off bits already known off in OP0, we need
3009 not do an AND. */
3010 if ((nzop0 & ~val1) == 0)
3011 return op0;
3013 nzop1 = nonzero_bits (trueop1, mode);
3014 /* If we are clearing all the nonzero bits, the result is zero. */
3015 if ((nzop1 & nzop0) == 0
3016 && !side_effects_p (op0) && !side_effects_p (op1))
3017 return CONST0_RTX (mode);
3019 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3020 && GET_MODE_CLASS (mode) != MODE_CC)
3021 return op0;
3022 /* A & (~A) -> 0 */
3023 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3024 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3025 && ! side_effects_p (op0)
3026 && GET_MODE_CLASS (mode) != MODE_CC)
3027 return CONST0_RTX (mode);
3029 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3030 there are no nonzero bits of C outside of X's mode. */
3031 if ((GET_CODE (op0) == SIGN_EXTEND
3032 || GET_CODE (op0) == ZERO_EXTEND)
3033 && CONST_INT_P (trueop1)
3034 && HWI_COMPUTABLE_MODE_P (mode)
3035 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3036 & UINTVAL (trueop1)) == 0)
3038 machine_mode imode = GET_MODE (XEXP (op0, 0));
3039 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3040 gen_int_mode (INTVAL (trueop1),
3041 imode));
3042 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3045 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3046 we might be able to further simplify the AND with X and potentially
3047 remove the truncation altogether. */
3048 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3050 rtx x = XEXP (op0, 0);
3051 machine_mode xmode = GET_MODE (x);
3052 tem = simplify_gen_binary (AND, xmode, x,
3053 gen_int_mode (INTVAL (trueop1), xmode));
3054 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3057 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3058 if (GET_CODE (op0) == IOR
3059 && CONST_INT_P (trueop1)
3060 && CONST_INT_P (XEXP (op0, 1)))
3062 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3063 return simplify_gen_binary (IOR, mode,
3064 simplify_gen_binary (AND, mode,
3065 XEXP (op0, 0), op1),
3066 gen_int_mode (tmp, mode));
3069 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3070 insn (and may simplify more). */
3071 if (GET_CODE (op0) == XOR
3072 && rtx_equal_p (XEXP (op0, 0), op1)
3073 && ! side_effects_p (op1))
3074 return simplify_gen_binary (AND, mode,
3075 simplify_gen_unary (NOT, mode,
3076 XEXP (op0, 1), mode),
3077 op1);
3079 if (GET_CODE (op0) == XOR
3080 && rtx_equal_p (XEXP (op0, 1), op1)
3081 && ! side_effects_p (op1))
3082 return simplify_gen_binary (AND, mode,
3083 simplify_gen_unary (NOT, mode,
3084 XEXP (op0, 0), mode),
3085 op1);
3087 /* Similarly for (~(A ^ B)) & A. */
3088 if (GET_CODE (op0) == NOT
3089 && GET_CODE (XEXP (op0, 0)) == XOR
3090 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3091 && ! side_effects_p (op1))
3092 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3094 if (GET_CODE (op0) == NOT
3095 && GET_CODE (XEXP (op0, 0)) == XOR
3096 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3097 && ! side_effects_p (op1))
3098 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3100 /* Convert (A | B) & A to A. */
3101 if (GET_CODE (op0) == IOR
3102 && (rtx_equal_p (XEXP (op0, 0), op1)
3103 || rtx_equal_p (XEXP (op0, 1), op1))
3104 && ! side_effects_p (XEXP (op0, 0))
3105 && ! side_effects_p (XEXP (op0, 1)))
3106 return op1;
3108 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3109 ((A & N) + B) & M -> (A + B) & M
3110 Similarly if (N & M) == 0,
3111 ((A | N) + B) & M -> (A + B) & M
3112 and for - instead of + and/or ^ instead of |.
3113 Also, if (N & M) == 0, then
3114 (A +- N) & M -> A & M. */
3115 if (CONST_INT_P (trueop1)
3116 && HWI_COMPUTABLE_MODE_P (mode)
3117 && ~UINTVAL (trueop1)
3118 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3119 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3121 rtx pmop[2];
3122 int which;
3124 pmop[0] = XEXP (op0, 0);
3125 pmop[1] = XEXP (op0, 1);
3127 if (CONST_INT_P (pmop[1])
3128 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3129 return simplify_gen_binary (AND, mode, pmop[0], op1);
3131 for (which = 0; which < 2; which++)
3133 tem = pmop[which];
3134 switch (GET_CODE (tem))
3136 case AND:
3137 if (CONST_INT_P (XEXP (tem, 1))
3138 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3139 == UINTVAL (trueop1))
3140 pmop[which] = XEXP (tem, 0);
3141 break;
3142 case IOR:
3143 case XOR:
3144 if (CONST_INT_P (XEXP (tem, 1))
3145 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3146 pmop[which] = XEXP (tem, 0);
3147 break;
3148 default:
3149 break;
3153 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3155 tem = simplify_gen_binary (GET_CODE (op0), mode,
3156 pmop[0], pmop[1]);
3157 return simplify_gen_binary (code, mode, tem, op1);
3161 /* (and X (ior (not X) Y) -> (and X Y) */
3162 if (GET_CODE (op1) == IOR
3163 && GET_CODE (XEXP (op1, 0)) == NOT
3164 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3165 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3167 /* (and (ior (not X) Y) X) -> (and X Y) */
3168 if (GET_CODE (op0) == IOR
3169 && GET_CODE (XEXP (op0, 0)) == NOT
3170 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3171 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3173 /* (and X (ior Y (not X)) -> (and X Y) */
3174 if (GET_CODE (op1) == IOR
3175 && GET_CODE (XEXP (op1, 1)) == NOT
3176 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3177 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3179 /* (and (ior Y (not X)) X) -> (and X Y) */
3180 if (GET_CODE (op0) == IOR
3181 && GET_CODE (XEXP (op0, 1)) == NOT
3182 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3183 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3185 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3186 if (tem)
3187 return tem;
3189 tem = simplify_associative_operation (code, mode, op0, op1);
3190 if (tem)
3191 return tem;
3192 break;
3194 case UDIV:
3195 /* 0/x is 0 (or x&0 if x has side-effects). */
3196 if (trueop0 == CONST0_RTX (mode)
3197 && !cfun->can_throw_non_call_exceptions)
3199 if (side_effects_p (op1))
3200 return simplify_gen_binary (AND, mode, op1, trueop0);
3201 return trueop0;
3203 /* x/1 is x. */
3204 if (trueop1 == CONST1_RTX (mode))
3206 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3207 if (tem)
3208 return tem;
3210 /* Convert divide by power of two into shift. */
3211 if (CONST_INT_P (trueop1)
3212 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3213 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3214 break;
3216 case DIV:
3217 /* Handle floating point and integers separately. */
3218 if (SCALAR_FLOAT_MODE_P (mode))
3220 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3221 safe for modes with NaNs, since 0.0 / 0.0 will then be
3222 NaN rather than 0.0. Nor is it safe for modes with signed
3223 zeros, since dividing 0 by a negative number gives -0.0 */
3224 if (trueop0 == CONST0_RTX (mode)
3225 && !HONOR_NANS (mode)
3226 && !HONOR_SIGNED_ZEROS (mode)
3227 && ! side_effects_p (op1))
3228 return op0;
3229 /* x/1.0 is x. */
3230 if (trueop1 == CONST1_RTX (mode)
3231 && !HONOR_SNANS (mode))
3232 return op0;
3234 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3235 && trueop1 != CONST0_RTX (mode))
3237 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3239 /* x/-1.0 is -x. */
3240 if (real_equal (d1, &dconstm1)
3241 && !HONOR_SNANS (mode))
3242 return simplify_gen_unary (NEG, mode, op0, mode);
3244 /* Change FP division by a constant into multiplication.
3245 Only do this with -freciprocal-math. */
3246 if (flag_reciprocal_math
3247 && !real_equal (d1, &dconst0))
3249 REAL_VALUE_TYPE d;
3250 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3251 tem = const_double_from_real_value (d, mode);
3252 return simplify_gen_binary (MULT, mode, op0, tem);
3256 else if (SCALAR_INT_MODE_P (mode))
3258 /* 0/x is 0 (or x&0 if x has side-effects). */
3259 if (trueop0 == CONST0_RTX (mode)
3260 && !cfun->can_throw_non_call_exceptions)
3262 if (side_effects_p (op1))
3263 return simplify_gen_binary (AND, mode, op1, trueop0);
3264 return trueop0;
3266 /* x/1 is x. */
3267 if (trueop1 == CONST1_RTX (mode))
3269 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3270 if (tem)
3271 return tem;
3273 /* x/-1 is -x. */
3274 if (trueop1 == constm1_rtx)
3276 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3277 if (x)
3278 return simplify_gen_unary (NEG, mode, x, mode);
3281 break;
3283 case UMOD:
3284 /* 0%x is 0 (or x&0 if x has side-effects). */
3285 if (trueop0 == CONST0_RTX (mode))
3287 if (side_effects_p (op1))
3288 return simplify_gen_binary (AND, mode, op1, trueop0);
3289 return trueop0;
3291 /* x%1 is 0 (of x&0 if x has side-effects). */
3292 if (trueop1 == CONST1_RTX (mode))
3294 if (side_effects_p (op0))
3295 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3296 return CONST0_RTX (mode);
3298 /* Implement modulus by power of two as AND. */
3299 if (CONST_INT_P (trueop1)
3300 && exact_log2 (UINTVAL (trueop1)) > 0)
3301 return simplify_gen_binary (AND, mode, op0,
3302 gen_int_mode (INTVAL (op1) - 1, mode));
3303 break;
3305 case MOD:
3306 /* 0%x is 0 (or x&0 if x has side-effects). */
3307 if (trueop0 == CONST0_RTX (mode))
3309 if (side_effects_p (op1))
3310 return simplify_gen_binary (AND, mode, op1, trueop0);
3311 return trueop0;
3313 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3314 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3316 if (side_effects_p (op0))
3317 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3318 return CONST0_RTX (mode);
3320 break;
3322 case ROTATERT:
3323 case ROTATE:
3324 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3325 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3326 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3327 amount instead. */
3328 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3329 if (CONST_INT_P (trueop1)
3330 && IN_RANGE (INTVAL (trueop1),
3331 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3332 GET_MODE_PRECISION (mode) - 1))
3333 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3334 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3335 - INTVAL (trueop1)));
3336 #endif
3337 /* FALLTHRU */
3338 case ASHIFTRT:
3339 if (trueop1 == CONST0_RTX (mode))
3340 return op0;
3341 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3342 return op0;
3343 /* Rotating ~0 always results in ~0. */
3344 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3345 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3346 && ! side_effects_p (op1))
3347 return op0;
3349 canonicalize_shift:
3350 /* Given:
3351 scalar modes M1, M2
3352 scalar constants c1, c2
3353 size (M2) > size (M1)
3354 c1 == size (M2) - size (M1)
3355 optimize:
3356 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3357 <low_part>)
3358 (const_int <c2>))
3360 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3361 <low_part>). */
3362 if ((code == ASHIFTRT || code == LSHIFTRT)
3363 && is_a <scalar_int_mode> (mode, &int_mode)
3364 && SUBREG_P (op0)
3365 && CONST_INT_P (op1)
3366 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3367 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3368 &inner_mode)
3369 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3370 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3371 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3372 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3373 && subreg_lowpart_p (op0))
3375 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3376 + INTVAL (op1));
3377 tmp = simplify_gen_binary (code, inner_mode,
3378 XEXP (SUBREG_REG (op0), 0),
3379 tmp);
3380 return lowpart_subreg (int_mode, tmp, inner_mode);
3383 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3385 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3386 if (val != INTVAL (op1))
3387 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3389 break;
3391 case ASHIFT:
3392 case SS_ASHIFT:
3393 case US_ASHIFT:
3394 if (trueop1 == CONST0_RTX (mode))
3395 return op0;
3396 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3397 return op0;
3398 goto canonicalize_shift;
3400 case LSHIFTRT:
3401 if (trueop1 == CONST0_RTX (mode))
3402 return op0;
3403 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3404 return op0;
3405 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3406 if (GET_CODE (op0) == CLZ
3407 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3408 && CONST_INT_P (trueop1)
3409 && STORE_FLAG_VALUE == 1
3410 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3412 unsigned HOST_WIDE_INT zero_val = 0;
3414 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3415 && zero_val == GET_MODE_PRECISION (inner_mode)
3416 && INTVAL (trueop1) == exact_log2 (zero_val))
3417 return simplify_gen_relational (EQ, mode, inner_mode,
3418 XEXP (op0, 0), const0_rtx);
3420 goto canonicalize_shift;
3422 case SMIN:
3423 if (width <= HOST_BITS_PER_WIDE_INT
3424 && mode_signbit_p (mode, trueop1)
3425 && ! side_effects_p (op0))
3426 return op1;
3427 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3428 return op0;
3429 tem = simplify_associative_operation (code, mode, op0, op1);
3430 if (tem)
3431 return tem;
3432 break;
3434 case SMAX:
3435 if (width <= HOST_BITS_PER_WIDE_INT
3436 && CONST_INT_P (trueop1)
3437 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3438 && ! side_effects_p (op0))
3439 return op1;
3440 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3441 return op0;
3442 tem = simplify_associative_operation (code, mode, op0, op1);
3443 if (tem)
3444 return tem;
3445 break;
3447 case UMIN:
3448 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3449 return op1;
3450 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3451 return op0;
3452 tem = simplify_associative_operation (code, mode, op0, op1);
3453 if (tem)
3454 return tem;
3455 break;
3457 case UMAX:
3458 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3459 return op1;
3460 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3461 return op0;
3462 tem = simplify_associative_operation (code, mode, op0, op1);
3463 if (tem)
3464 return tem;
3465 break;
3467 case SS_PLUS:
3468 case US_PLUS:
3469 case SS_MINUS:
3470 case US_MINUS:
3471 case SS_MULT:
3472 case US_MULT:
3473 case SS_DIV:
3474 case US_DIV:
3475 /* ??? There are simplifications that can be done. */
3476 return 0;
3478 case VEC_SELECT:
3479 if (!VECTOR_MODE_P (mode))
3481 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3482 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3483 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3484 gcc_assert (XVECLEN (trueop1, 0) == 1);
3485 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3487 if (GET_CODE (trueop0) == CONST_VECTOR)
3488 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3489 (trueop1, 0, 0)));
3491 /* Extract a scalar element from a nested VEC_SELECT expression
3492 (with optional nested VEC_CONCAT expression). Some targets
3493 (i386) extract scalar element from a vector using chain of
3494 nested VEC_SELECT expressions. When input operand is a memory
3495 operand, this operation can be simplified to a simple scalar
3496 load from an offseted memory address. */
3497 if (GET_CODE (trueop0) == VEC_SELECT)
3499 rtx op0 = XEXP (trueop0, 0);
3500 rtx op1 = XEXP (trueop0, 1);
3502 machine_mode opmode = GET_MODE (op0);
3503 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3504 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3506 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3507 int elem;
3509 rtvec vec;
3510 rtx tmp_op, tmp;
3512 gcc_assert (GET_CODE (op1) == PARALLEL);
3513 gcc_assert (i < n_elts);
3515 /* Select element, pointed by nested selector. */
3516 elem = INTVAL (XVECEXP (op1, 0, i));
3518 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3519 if (GET_CODE (op0) == VEC_CONCAT)
3521 rtx op00 = XEXP (op0, 0);
3522 rtx op01 = XEXP (op0, 1);
3524 machine_mode mode00, mode01;
3525 int n_elts00, n_elts01;
3527 mode00 = GET_MODE (op00);
3528 mode01 = GET_MODE (op01);
3530 /* Find out number of elements of each operand. */
3531 if (VECTOR_MODE_P (mode00))
3533 elt_size = GET_MODE_UNIT_SIZE (mode00);
3534 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3536 else
3537 n_elts00 = 1;
3539 if (VECTOR_MODE_P (mode01))
3541 elt_size = GET_MODE_UNIT_SIZE (mode01);
3542 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3544 else
3545 n_elts01 = 1;
3547 gcc_assert (n_elts == n_elts00 + n_elts01);
3549 /* Select correct operand of VEC_CONCAT
3550 and adjust selector. */
3551 if (elem < n_elts01)
3552 tmp_op = op00;
3553 else
3555 tmp_op = op01;
3556 elem -= n_elts00;
3559 else
3560 tmp_op = op0;
3562 vec = rtvec_alloc (1);
3563 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3565 tmp = gen_rtx_fmt_ee (code, mode,
3566 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3567 return tmp;
3569 if (GET_CODE (trueop0) == VEC_DUPLICATE
3570 && GET_MODE (XEXP (trueop0, 0)) == mode)
3571 return XEXP (trueop0, 0);
3573 else
3575 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3576 gcc_assert (GET_MODE_INNER (mode)
3577 == GET_MODE_INNER (GET_MODE (trueop0)));
3578 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3580 if (GET_CODE (trueop0) == CONST_VECTOR)
3582 int elt_size = GET_MODE_UNIT_SIZE (mode);
3583 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3584 rtvec v = rtvec_alloc (n_elts);
3585 unsigned int i;
3587 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3588 for (i = 0; i < n_elts; i++)
3590 rtx x = XVECEXP (trueop1, 0, i);
3592 gcc_assert (CONST_INT_P (x));
3593 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3594 INTVAL (x));
3597 return gen_rtx_CONST_VECTOR (mode, v);
3600 /* Recognize the identity. */
3601 if (GET_MODE (trueop0) == mode)
3603 bool maybe_ident = true;
3604 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3606 rtx j = XVECEXP (trueop1, 0, i);
3607 if (!CONST_INT_P (j) || INTVAL (j) != i)
3609 maybe_ident = false;
3610 break;
3613 if (maybe_ident)
3614 return trueop0;
3617 /* If we build {a,b} then permute it, build the result directly. */
3618 if (XVECLEN (trueop1, 0) == 2
3619 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3620 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3621 && GET_CODE (trueop0) == VEC_CONCAT
3622 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3623 && GET_MODE (XEXP (trueop0, 0)) == mode
3624 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3625 && GET_MODE (XEXP (trueop0, 1)) == mode)
3627 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3628 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3629 rtx subop0, subop1;
3631 gcc_assert (i0 < 4 && i1 < 4);
3632 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3633 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3635 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3638 if (XVECLEN (trueop1, 0) == 2
3639 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3640 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3641 && GET_CODE (trueop0) == VEC_CONCAT
3642 && GET_MODE (trueop0) == mode)
3644 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3645 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3646 rtx subop0, subop1;
3648 gcc_assert (i0 < 2 && i1 < 2);
3649 subop0 = XEXP (trueop0, i0);
3650 subop1 = XEXP (trueop0, i1);
3652 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3655 /* If we select one half of a vec_concat, return that. */
3656 if (GET_CODE (trueop0) == VEC_CONCAT
3657 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3659 rtx subop0 = XEXP (trueop0, 0);
3660 rtx subop1 = XEXP (trueop0, 1);
3661 machine_mode mode0 = GET_MODE (subop0);
3662 machine_mode mode1 = GET_MODE (subop1);
3663 int li = GET_MODE_UNIT_SIZE (mode0);
3664 int l0 = GET_MODE_SIZE (mode0) / li;
3665 int l1 = GET_MODE_SIZE (mode1) / li;
3666 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3667 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3669 bool success = true;
3670 for (int i = 1; i < l0; ++i)
3672 rtx j = XVECEXP (trueop1, 0, i);
3673 if (!CONST_INT_P (j) || INTVAL (j) != i)
3675 success = false;
3676 break;
3679 if (success)
3680 return subop0;
3682 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3684 bool success = true;
3685 for (int i = 1; i < l1; ++i)
3687 rtx j = XVECEXP (trueop1, 0, i);
3688 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3690 success = false;
3691 break;
3694 if (success)
3695 return subop1;
3700 if (XVECLEN (trueop1, 0) == 1
3701 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3702 && GET_CODE (trueop0) == VEC_CONCAT)
3704 rtx vec = trueop0;
3705 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3707 /* Try to find the element in the VEC_CONCAT. */
3708 while (GET_MODE (vec) != mode
3709 && GET_CODE (vec) == VEC_CONCAT)
3711 HOST_WIDE_INT vec_size;
3713 if (CONST_INT_P (XEXP (vec, 0)))
3715 /* vec_concat of two const_ints doesn't make sense with
3716 respect to modes. */
3717 if (CONST_INT_P (XEXP (vec, 1)))
3718 return 0;
3720 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3721 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3723 else
3724 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3726 if (offset < vec_size)
3727 vec = XEXP (vec, 0);
3728 else
3730 offset -= vec_size;
3731 vec = XEXP (vec, 1);
3733 vec = avoid_constant_pool_reference (vec);
3736 if (GET_MODE (vec) == mode)
3737 return vec;
3740 /* If we select elements in a vec_merge that all come from the same
3741 operand, select from that operand directly. */
3742 if (GET_CODE (op0) == VEC_MERGE)
3744 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3745 if (CONST_INT_P (trueop02))
3747 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3748 bool all_operand0 = true;
3749 bool all_operand1 = true;
3750 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3752 rtx j = XVECEXP (trueop1, 0, i);
3753 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3754 all_operand1 = false;
3755 else
3756 all_operand0 = false;
3758 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3759 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3760 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3761 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3765 /* If we have two nested selects that are inverses of each
3766 other, replace them with the source operand. */
3767 if (GET_CODE (trueop0) == VEC_SELECT
3768 && GET_MODE (XEXP (trueop0, 0)) == mode)
3770 rtx op0_subop1 = XEXP (trueop0, 1);
3771 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3772 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3774 /* Apply the outer ordering vector to the inner one. (The inner
3775 ordering vector is expressly permitted to be of a different
3776 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3777 then the two VEC_SELECTs cancel. */
3778 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3780 rtx x = XVECEXP (trueop1, 0, i);
3781 if (!CONST_INT_P (x))
3782 return 0;
3783 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3784 if (!CONST_INT_P (y) || i != INTVAL (y))
3785 return 0;
3787 return XEXP (trueop0, 0);
3790 return 0;
3791 case VEC_CONCAT:
3793 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3794 ? GET_MODE (trueop0)
3795 : GET_MODE_INNER (mode));
3796 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3797 ? GET_MODE (trueop1)
3798 : GET_MODE_INNER (mode));
3800 gcc_assert (VECTOR_MODE_P (mode));
3801 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3802 == GET_MODE_SIZE (mode));
3804 if (VECTOR_MODE_P (op0_mode))
3805 gcc_assert (GET_MODE_INNER (mode)
3806 == GET_MODE_INNER (op0_mode));
3807 else
3808 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3810 if (VECTOR_MODE_P (op1_mode))
3811 gcc_assert (GET_MODE_INNER (mode)
3812 == GET_MODE_INNER (op1_mode));
3813 else
3814 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3816 if ((GET_CODE (trueop0) == CONST_VECTOR
3817 || CONST_SCALAR_INT_P (trueop0)
3818 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3819 && (GET_CODE (trueop1) == CONST_VECTOR
3820 || CONST_SCALAR_INT_P (trueop1)
3821 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3823 int elt_size = GET_MODE_UNIT_SIZE (mode);
3824 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3825 rtvec v = rtvec_alloc (n_elts);
3826 unsigned int i;
3827 unsigned in_n_elts = 1;
3829 if (VECTOR_MODE_P (op0_mode))
3830 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3831 for (i = 0; i < n_elts; i++)
3833 if (i < in_n_elts)
3835 if (!VECTOR_MODE_P (op0_mode))
3836 RTVEC_ELT (v, i) = trueop0;
3837 else
3838 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3840 else
3842 if (!VECTOR_MODE_P (op1_mode))
3843 RTVEC_ELT (v, i) = trueop1;
3844 else
3845 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3846 i - in_n_elts);
3850 return gen_rtx_CONST_VECTOR (mode, v);
3853 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3854 Restrict the transformation to avoid generating a VEC_SELECT with a
3855 mode unrelated to its operand. */
3856 if (GET_CODE (trueop0) == VEC_SELECT
3857 && GET_CODE (trueop1) == VEC_SELECT
3858 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3859 && GET_MODE (XEXP (trueop0, 0)) == mode)
3861 rtx par0 = XEXP (trueop0, 1);
3862 rtx par1 = XEXP (trueop1, 1);
3863 int len0 = XVECLEN (par0, 0);
3864 int len1 = XVECLEN (par1, 0);
3865 rtvec vec = rtvec_alloc (len0 + len1);
3866 for (int i = 0; i < len0; i++)
3867 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3868 for (int i = 0; i < len1; i++)
3869 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3870 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3871 gen_rtx_PARALLEL (VOIDmode, vec));
3874 return 0;
3876 default:
3877 gcc_unreachable ();
3880 return 0;
3884 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3885 rtx op0, rtx op1)
3887 if (VECTOR_MODE_P (mode)
3888 && code != VEC_CONCAT
3889 && GET_CODE (op0) == CONST_VECTOR
3890 && GET_CODE (op1) == CONST_VECTOR)
3892 unsigned n_elts = GET_MODE_NUNITS (mode);
3893 machine_mode op0mode = GET_MODE (op0);
3894 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3895 machine_mode op1mode = GET_MODE (op1);
3896 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3897 rtvec v = rtvec_alloc (n_elts);
3898 unsigned int i;
3900 gcc_assert (op0_n_elts == n_elts);
3901 gcc_assert (op1_n_elts == n_elts);
3902 for (i = 0; i < n_elts; i++)
3904 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3905 CONST_VECTOR_ELT (op0, i),
3906 CONST_VECTOR_ELT (op1, i));
3907 if (!x)
3908 return 0;
3909 RTVEC_ELT (v, i) = x;
3912 return gen_rtx_CONST_VECTOR (mode, v);
3915 if (VECTOR_MODE_P (mode)
3916 && code == VEC_CONCAT
3917 && (CONST_SCALAR_INT_P (op0)
3918 || GET_CODE (op0) == CONST_FIXED
3919 || CONST_DOUBLE_AS_FLOAT_P (op0))
3920 && (CONST_SCALAR_INT_P (op1)
3921 || CONST_DOUBLE_AS_FLOAT_P (op1)
3922 || GET_CODE (op1) == CONST_FIXED))
3924 unsigned n_elts = GET_MODE_NUNITS (mode);
3925 rtvec v = rtvec_alloc (n_elts);
3927 gcc_assert (n_elts >= 2);
3928 if (n_elts == 2)
3930 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3931 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3933 RTVEC_ELT (v, 0) = op0;
3934 RTVEC_ELT (v, 1) = op1;
3936 else
3938 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3939 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3940 unsigned i;
3942 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3943 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3944 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3946 for (i = 0; i < op0_n_elts; ++i)
3947 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3948 for (i = 0; i < op1_n_elts; ++i)
3949 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3952 return gen_rtx_CONST_VECTOR (mode, v);
3955 if (SCALAR_FLOAT_MODE_P (mode)
3956 && CONST_DOUBLE_AS_FLOAT_P (op0)
3957 && CONST_DOUBLE_AS_FLOAT_P (op1)
3958 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3960 if (code == AND
3961 || code == IOR
3962 || code == XOR)
3964 long tmp0[4];
3965 long tmp1[4];
3966 REAL_VALUE_TYPE r;
3967 int i;
3969 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3970 GET_MODE (op0));
3971 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3972 GET_MODE (op1));
3973 for (i = 0; i < 4; i++)
3975 switch (code)
3977 case AND:
3978 tmp0[i] &= tmp1[i];
3979 break;
3980 case IOR:
3981 tmp0[i] |= tmp1[i];
3982 break;
3983 case XOR:
3984 tmp0[i] ^= tmp1[i];
3985 break;
3986 default:
3987 gcc_unreachable ();
3990 real_from_target (&r, tmp0, mode);
3991 return const_double_from_real_value (r, mode);
3993 else
3995 REAL_VALUE_TYPE f0, f1, value, result;
3996 const REAL_VALUE_TYPE *opr0, *opr1;
3997 bool inexact;
3999 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4000 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4002 if (HONOR_SNANS (mode)
4003 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4004 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4005 return 0;
4007 real_convert (&f0, mode, opr0);
4008 real_convert (&f1, mode, opr1);
4010 if (code == DIV
4011 && real_equal (&f1, &dconst0)
4012 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4013 return 0;
4015 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4016 && flag_trapping_math
4017 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4019 int s0 = REAL_VALUE_NEGATIVE (f0);
4020 int s1 = REAL_VALUE_NEGATIVE (f1);
4022 switch (code)
4024 case PLUS:
4025 /* Inf + -Inf = NaN plus exception. */
4026 if (s0 != s1)
4027 return 0;
4028 break;
4029 case MINUS:
4030 /* Inf - Inf = NaN plus exception. */
4031 if (s0 == s1)
4032 return 0;
4033 break;
4034 case DIV:
4035 /* Inf / Inf = NaN plus exception. */
4036 return 0;
4037 default:
4038 break;
4042 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4043 && flag_trapping_math
4044 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4045 || (REAL_VALUE_ISINF (f1)
4046 && real_equal (&f0, &dconst0))))
4047 /* Inf * 0 = NaN plus exception. */
4048 return 0;
4050 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4051 &f0, &f1);
4052 real_convert (&result, mode, &value);
4054 /* Don't constant fold this floating point operation if
4055 the result has overflowed and flag_trapping_math. */
4057 if (flag_trapping_math
4058 && MODE_HAS_INFINITIES (mode)
4059 && REAL_VALUE_ISINF (result)
4060 && !REAL_VALUE_ISINF (f0)
4061 && !REAL_VALUE_ISINF (f1))
4062 /* Overflow plus exception. */
4063 return 0;
4065 /* Don't constant fold this floating point operation if the
4066 result may dependent upon the run-time rounding mode and
4067 flag_rounding_math is set, or if GCC's software emulation
4068 is unable to accurately represent the result. */
4070 if ((flag_rounding_math
4071 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4072 && (inexact || !real_identical (&result, &value)))
4073 return NULL_RTX;
4075 return const_double_from_real_value (result, mode);
4079 /* We can fold some multi-word operations. */
4080 scalar_int_mode int_mode;
4081 if (is_a <scalar_int_mode> (mode, &int_mode)
4082 && CONST_SCALAR_INT_P (op0)
4083 && CONST_SCALAR_INT_P (op1))
4085 wide_int result;
4086 bool overflow;
4087 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4088 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4090 #if TARGET_SUPPORTS_WIDE_INT == 0
4091 /* This assert keeps the simplification from producing a result
4092 that cannot be represented in a CONST_DOUBLE but a lot of
4093 upstream callers expect that this function never fails to
4094 simplify something and so you if you added this to the test
4095 above the code would die later anyway. If this assert
4096 happens, you just need to make the port support wide int. */
4097 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4098 #endif
4099 switch (code)
4101 case MINUS:
4102 result = wi::sub (pop0, pop1);
4103 break;
4105 case PLUS:
4106 result = wi::add (pop0, pop1);
4107 break;
4109 case MULT:
4110 result = wi::mul (pop0, pop1);
4111 break;
4113 case DIV:
4114 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4115 if (overflow)
4116 return NULL_RTX;
4117 break;
4119 case MOD:
4120 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4121 if (overflow)
4122 return NULL_RTX;
4123 break;
4125 case UDIV:
4126 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4127 if (overflow)
4128 return NULL_RTX;
4129 break;
4131 case UMOD:
4132 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4133 if (overflow)
4134 return NULL_RTX;
4135 break;
4137 case AND:
4138 result = wi::bit_and (pop0, pop1);
4139 break;
4141 case IOR:
4142 result = wi::bit_or (pop0, pop1);
4143 break;
4145 case XOR:
4146 result = wi::bit_xor (pop0, pop1);
4147 break;
4149 case SMIN:
4150 result = wi::smin (pop0, pop1);
4151 break;
4153 case SMAX:
4154 result = wi::smax (pop0, pop1);
4155 break;
4157 case UMIN:
4158 result = wi::umin (pop0, pop1);
4159 break;
4161 case UMAX:
4162 result = wi::umax (pop0, pop1);
4163 break;
4165 case LSHIFTRT:
4166 case ASHIFTRT:
4167 case ASHIFT:
4169 wide_int wop1 = pop1;
4170 if (SHIFT_COUNT_TRUNCATED)
4171 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4172 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4173 return NULL_RTX;
4175 switch (code)
4177 case LSHIFTRT:
4178 result = wi::lrshift (pop0, wop1);
4179 break;
4181 case ASHIFTRT:
4182 result = wi::arshift (pop0, wop1);
4183 break;
4185 case ASHIFT:
4186 result = wi::lshift (pop0, wop1);
4187 break;
4189 default:
4190 gcc_unreachable ();
4192 break;
4194 case ROTATE:
4195 case ROTATERT:
4197 if (wi::neg_p (pop1))
4198 return NULL_RTX;
4200 switch (code)
4202 case ROTATE:
4203 result = wi::lrotate (pop0, pop1);
4204 break;
4206 case ROTATERT:
4207 result = wi::rrotate (pop0, pop1);
4208 break;
4210 default:
4211 gcc_unreachable ();
4213 break;
4215 default:
4216 return NULL_RTX;
4218 return immed_wide_int_const (result, int_mode);
4221 return NULL_RTX;
4226 /* Return a positive integer if X should sort after Y. The value
4227 returned is 1 if and only if X and Y are both regs. */
4229 static int
4230 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4232 int result;
4234 result = (commutative_operand_precedence (y)
4235 - commutative_operand_precedence (x));
4236 if (result)
4237 return result + result;
4239 /* Group together equal REGs to do more simplification. */
4240 if (REG_P (x) && REG_P (y))
4241 return REGNO (x) > REGNO (y);
4243 return 0;
4246 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4247 operands may be another PLUS or MINUS.
4249 Rather than test for specific case, we do this by a brute-force method
4250 and do all possible simplifications until no more changes occur. Then
4251 we rebuild the operation.
4253 May return NULL_RTX when no changes were made. */
4255 static rtx
4256 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4257 rtx op1)
4259 struct simplify_plus_minus_op_data
4261 rtx op;
4262 short neg;
4263 } ops[16];
4264 rtx result, tem;
4265 int n_ops = 2;
4266 int changed, n_constants, canonicalized = 0;
4267 int i, j;
4269 memset (ops, 0, sizeof ops);
4271 /* Set up the two operands and then expand them until nothing has been
4272 changed. If we run out of room in our array, give up; this should
4273 almost never happen. */
4275 ops[0].op = op0;
4276 ops[0].neg = 0;
4277 ops[1].op = op1;
4278 ops[1].neg = (code == MINUS);
4282 changed = 0;
4283 n_constants = 0;
4285 for (i = 0; i < n_ops; i++)
4287 rtx this_op = ops[i].op;
4288 int this_neg = ops[i].neg;
4289 enum rtx_code this_code = GET_CODE (this_op);
4291 switch (this_code)
4293 case PLUS:
4294 case MINUS:
4295 if (n_ops == ARRAY_SIZE (ops))
4296 return NULL_RTX;
4298 ops[n_ops].op = XEXP (this_op, 1);
4299 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4300 n_ops++;
4302 ops[i].op = XEXP (this_op, 0);
4303 changed = 1;
4304 /* If this operand was negated then we will potentially
4305 canonicalize the expression. Similarly if we don't
4306 place the operands adjacent we're re-ordering the
4307 expression and thus might be performing a
4308 canonicalization. Ignore register re-ordering.
4309 ??? It might be better to shuffle the ops array here,
4310 but then (plus (plus (A, B), plus (C, D))) wouldn't
4311 be seen as non-canonical. */
4312 if (this_neg
4313 || (i != n_ops - 2
4314 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4315 canonicalized = 1;
4316 break;
4318 case NEG:
4319 ops[i].op = XEXP (this_op, 0);
4320 ops[i].neg = ! this_neg;
4321 changed = 1;
4322 canonicalized = 1;
4323 break;
4325 case CONST:
4326 if (n_ops != ARRAY_SIZE (ops)
4327 && GET_CODE (XEXP (this_op, 0)) == PLUS
4328 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4329 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4331 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4332 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4333 ops[n_ops].neg = this_neg;
4334 n_ops++;
4335 changed = 1;
4336 canonicalized = 1;
4338 break;
4340 case NOT:
4341 /* ~a -> (-a - 1) */
4342 if (n_ops != ARRAY_SIZE (ops))
4344 ops[n_ops].op = CONSTM1_RTX (mode);
4345 ops[n_ops++].neg = this_neg;
4346 ops[i].op = XEXP (this_op, 0);
4347 ops[i].neg = !this_neg;
4348 changed = 1;
4349 canonicalized = 1;
4351 break;
4353 case CONST_INT:
4354 n_constants++;
4355 if (this_neg)
4357 ops[i].op = neg_const_int (mode, this_op);
4358 ops[i].neg = 0;
4359 changed = 1;
4360 canonicalized = 1;
4362 break;
4364 default:
4365 break;
4369 while (changed);
4371 if (n_constants > 1)
4372 canonicalized = 1;
4374 gcc_assert (n_ops >= 2);
4376 /* If we only have two operands, we can avoid the loops. */
4377 if (n_ops == 2)
4379 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4380 rtx lhs, rhs;
4382 /* Get the two operands. Be careful with the order, especially for
4383 the cases where code == MINUS. */
4384 if (ops[0].neg && ops[1].neg)
4386 lhs = gen_rtx_NEG (mode, ops[0].op);
4387 rhs = ops[1].op;
4389 else if (ops[0].neg)
4391 lhs = ops[1].op;
4392 rhs = ops[0].op;
4394 else
4396 lhs = ops[0].op;
4397 rhs = ops[1].op;
4400 return simplify_const_binary_operation (code, mode, lhs, rhs);
4403 /* Now simplify each pair of operands until nothing changes. */
4404 while (1)
4406 /* Insertion sort is good enough for a small array. */
4407 for (i = 1; i < n_ops; i++)
4409 struct simplify_plus_minus_op_data save;
4410 int cmp;
4412 j = i - 1;
4413 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4414 if (cmp <= 0)
4415 continue;
4416 /* Just swapping registers doesn't count as canonicalization. */
4417 if (cmp != 1)
4418 canonicalized = 1;
4420 save = ops[i];
4422 ops[j + 1] = ops[j];
4423 while (j--
4424 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4425 ops[j + 1] = save;
4428 changed = 0;
4429 for (i = n_ops - 1; i > 0; i--)
4430 for (j = i - 1; j >= 0; j--)
4432 rtx lhs = ops[j].op, rhs = ops[i].op;
4433 int lneg = ops[j].neg, rneg = ops[i].neg;
4435 if (lhs != 0 && rhs != 0)
4437 enum rtx_code ncode = PLUS;
4439 if (lneg != rneg)
4441 ncode = MINUS;
4442 if (lneg)
4443 std::swap (lhs, rhs);
4445 else if (swap_commutative_operands_p (lhs, rhs))
4446 std::swap (lhs, rhs);
4448 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4449 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4451 rtx tem_lhs, tem_rhs;
4453 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4454 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4455 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4456 tem_rhs);
4458 if (tem && !CONSTANT_P (tem))
4459 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4461 else
4462 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4464 if (tem)
4466 /* Reject "simplifications" that just wrap the two
4467 arguments in a CONST. Failure to do so can result
4468 in infinite recursion with simplify_binary_operation
4469 when it calls us to simplify CONST operations.
4470 Also, if we find such a simplification, don't try
4471 any more combinations with this rhs: We must have
4472 something like symbol+offset, ie. one of the
4473 trivial CONST expressions we handle later. */
4474 if (GET_CODE (tem) == CONST
4475 && GET_CODE (XEXP (tem, 0)) == ncode
4476 && XEXP (XEXP (tem, 0), 0) == lhs
4477 && XEXP (XEXP (tem, 0), 1) == rhs)
4478 break;
4479 lneg &= rneg;
4480 if (GET_CODE (tem) == NEG)
4481 tem = XEXP (tem, 0), lneg = !lneg;
4482 if (CONST_INT_P (tem) && lneg)
4483 tem = neg_const_int (mode, tem), lneg = 0;
4485 ops[i].op = tem;
4486 ops[i].neg = lneg;
4487 ops[j].op = NULL_RTX;
4488 changed = 1;
4489 canonicalized = 1;
4494 if (!changed)
4495 break;
4497 /* Pack all the operands to the lower-numbered entries. */
4498 for (i = 0, j = 0; j < n_ops; j++)
4499 if (ops[j].op)
4501 ops[i] = ops[j];
4502 i++;
4504 n_ops = i;
4507 /* If nothing changed, check that rematerialization of rtl instructions
4508 is still required. */
4509 if (!canonicalized)
4511 /* Perform rematerialization if only all operands are registers and
4512 all operations are PLUS. */
4513 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4514 around rs6000 and how it uses the CA register. See PR67145. */
4515 for (i = 0; i < n_ops; i++)
4516 if (ops[i].neg
4517 || !REG_P (ops[i].op)
4518 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4519 && fixed_regs[REGNO (ops[i].op)]
4520 && !global_regs[REGNO (ops[i].op)]
4521 && ops[i].op != frame_pointer_rtx
4522 && ops[i].op != arg_pointer_rtx
4523 && ops[i].op != stack_pointer_rtx))
4524 return NULL_RTX;
4525 goto gen_result;
4528 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4529 if (n_ops == 2
4530 && CONST_INT_P (ops[1].op)
4531 && CONSTANT_P (ops[0].op)
4532 && ops[0].neg)
4533 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4535 /* We suppressed creation of trivial CONST expressions in the
4536 combination loop to avoid recursion. Create one manually now.
4537 The combination loop should have ensured that there is exactly
4538 one CONST_INT, and the sort will have ensured that it is last
4539 in the array and that any other constant will be next-to-last. */
4541 if (n_ops > 1
4542 && CONST_INT_P (ops[n_ops - 1].op)
4543 && CONSTANT_P (ops[n_ops - 2].op))
4545 rtx value = ops[n_ops - 1].op;
4546 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4547 value = neg_const_int (mode, value);
4548 if (CONST_INT_P (value))
4550 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4551 INTVAL (value));
4552 n_ops--;
4556 /* Put a non-negated operand first, if possible. */
4558 for (i = 0; i < n_ops && ops[i].neg; i++)
4559 continue;
4560 if (i == n_ops)
4561 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4562 else if (i != 0)
4564 tem = ops[0].op;
4565 ops[0] = ops[i];
4566 ops[i].op = tem;
4567 ops[i].neg = 1;
4570 /* Now make the result by performing the requested operations. */
4571 gen_result:
4572 result = ops[0].op;
4573 for (i = 1; i < n_ops; i++)
4574 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4575 mode, result, ops[i].op);
4577 return result;
4580 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4581 static bool
4582 plus_minus_operand_p (const_rtx x)
4584 return GET_CODE (x) == PLUS
4585 || GET_CODE (x) == MINUS
4586 || (GET_CODE (x) == CONST
4587 && GET_CODE (XEXP (x, 0)) == PLUS
4588 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4589 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4592 /* Like simplify_binary_operation except used for relational operators.
4593 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4594 not also be VOIDmode.
4596 CMP_MODE specifies in which mode the comparison is done in, so it is
4597 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4598 the operands or, if both are VOIDmode, the operands are compared in
4599 "infinite precision". */
4601 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4602 machine_mode cmp_mode, rtx op0, rtx op1)
4604 rtx tem, trueop0, trueop1;
4606 if (cmp_mode == VOIDmode)
4607 cmp_mode = GET_MODE (op0);
4608 if (cmp_mode == VOIDmode)
4609 cmp_mode = GET_MODE (op1);
4611 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4612 if (tem)
4614 if (SCALAR_FLOAT_MODE_P (mode))
4616 if (tem == const0_rtx)
4617 return CONST0_RTX (mode);
4618 #ifdef FLOAT_STORE_FLAG_VALUE
4620 REAL_VALUE_TYPE val;
4621 val = FLOAT_STORE_FLAG_VALUE (mode);
4622 return const_double_from_real_value (val, mode);
4624 #else
4625 return NULL_RTX;
4626 #endif
4628 if (VECTOR_MODE_P (mode))
4630 if (tem == const0_rtx)
4631 return CONST0_RTX (mode);
4632 #ifdef VECTOR_STORE_FLAG_VALUE
4634 int i, units;
4635 rtvec v;
4637 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4638 if (val == NULL_RTX)
4639 return NULL_RTX;
4640 if (val == const1_rtx)
4641 return CONST1_RTX (mode);
4643 units = GET_MODE_NUNITS (mode);
4644 v = rtvec_alloc (units);
4645 for (i = 0; i < units; i++)
4646 RTVEC_ELT (v, i) = val;
4647 return gen_rtx_raw_CONST_VECTOR (mode, v);
4649 #else
4650 return NULL_RTX;
4651 #endif
4654 return tem;
4657 /* For the following tests, ensure const0_rtx is op1. */
4658 if (swap_commutative_operands_p (op0, op1)
4659 || (op0 == const0_rtx && op1 != const0_rtx))
4660 std::swap (op0, op1), code = swap_condition (code);
4662 /* If op0 is a compare, extract the comparison arguments from it. */
4663 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4664 return simplify_gen_relational (code, mode, VOIDmode,
4665 XEXP (op0, 0), XEXP (op0, 1));
4667 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4668 || CC0_P (op0))
4669 return NULL_RTX;
4671 trueop0 = avoid_constant_pool_reference (op0);
4672 trueop1 = avoid_constant_pool_reference (op1);
4673 return simplify_relational_operation_1 (code, mode, cmp_mode,
4674 trueop0, trueop1);
4677 /* This part of simplify_relational_operation is only used when CMP_MODE
4678 is not in class MODE_CC (i.e. it is a real comparison).
4680 MODE is the mode of the result, while CMP_MODE specifies in which
4681 mode the comparison is done in, so it is the mode of the operands. */
4683 static rtx
4684 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4685 machine_mode cmp_mode, rtx op0, rtx op1)
4687 enum rtx_code op0code = GET_CODE (op0);
4689 if (op1 == const0_rtx && COMPARISON_P (op0))
4691 /* If op0 is a comparison, extract the comparison arguments
4692 from it. */
4693 if (code == NE)
4695 if (GET_MODE (op0) == mode)
4696 return simplify_rtx (op0);
4697 else
4698 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4699 XEXP (op0, 0), XEXP (op0, 1));
4701 else if (code == EQ)
4703 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4704 if (new_code != UNKNOWN)
4705 return simplify_gen_relational (new_code, mode, VOIDmode,
4706 XEXP (op0, 0), XEXP (op0, 1));
4710 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4711 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4712 if ((code == LTU || code == GEU)
4713 && GET_CODE (op0) == PLUS
4714 && CONST_INT_P (XEXP (op0, 1))
4715 && (rtx_equal_p (op1, XEXP (op0, 0))
4716 || rtx_equal_p (op1, XEXP (op0, 1)))
4717 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4718 && XEXP (op0, 1) != const0_rtx)
4720 rtx new_cmp
4721 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4722 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4723 cmp_mode, XEXP (op0, 0), new_cmp);
4726 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4727 transformed into (LTU a -C). */
4728 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4729 && CONST_INT_P (XEXP (op0, 1))
4730 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4731 && XEXP (op0, 1) != const0_rtx)
4733 rtx new_cmp
4734 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4735 return simplify_gen_relational (LTU, mode, cmp_mode,
4736 XEXP (op0, 0), new_cmp);
4739 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4740 if ((code == LTU || code == GEU)
4741 && GET_CODE (op0) == PLUS
4742 && rtx_equal_p (op1, XEXP (op0, 1))
4743 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4744 && !rtx_equal_p (op1, XEXP (op0, 0)))
4745 return simplify_gen_relational (code, mode, cmp_mode, op0,
4746 copy_rtx (XEXP (op0, 0)));
4748 if (op1 == const0_rtx)
4750 /* Canonicalize (GTU x 0) as (NE x 0). */
4751 if (code == GTU)
4752 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4753 /* Canonicalize (LEU x 0) as (EQ x 0). */
4754 if (code == LEU)
4755 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4757 else if (op1 == const1_rtx)
4759 switch (code)
4761 case GE:
4762 /* Canonicalize (GE x 1) as (GT x 0). */
4763 return simplify_gen_relational (GT, mode, cmp_mode,
4764 op0, const0_rtx);
4765 case GEU:
4766 /* Canonicalize (GEU x 1) as (NE x 0). */
4767 return simplify_gen_relational (NE, mode, cmp_mode,
4768 op0, const0_rtx);
4769 case LT:
4770 /* Canonicalize (LT x 1) as (LE x 0). */
4771 return simplify_gen_relational (LE, mode, cmp_mode,
4772 op0, const0_rtx);
4773 case LTU:
4774 /* Canonicalize (LTU x 1) as (EQ x 0). */
4775 return simplify_gen_relational (EQ, mode, cmp_mode,
4776 op0, const0_rtx);
4777 default:
4778 break;
4781 else if (op1 == constm1_rtx)
4783 /* Canonicalize (LE x -1) as (LT x 0). */
4784 if (code == LE)
4785 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4786 /* Canonicalize (GT x -1) as (GE x 0). */
4787 if (code == GT)
4788 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4791 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4792 if ((code == EQ || code == NE)
4793 && (op0code == PLUS || op0code == MINUS)
4794 && CONSTANT_P (op1)
4795 && CONSTANT_P (XEXP (op0, 1))
4796 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4798 rtx x = XEXP (op0, 0);
4799 rtx c = XEXP (op0, 1);
4800 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4801 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4803 /* Detect an infinite recursive condition, where we oscillate at this
4804 simplification case between:
4805 A + B == C <---> C - B == A,
4806 where A, B, and C are all constants with non-simplifiable expressions,
4807 usually SYMBOL_REFs. */
4808 if (GET_CODE (tem) == invcode
4809 && CONSTANT_P (x)
4810 && rtx_equal_p (c, XEXP (tem, 1)))
4811 return NULL_RTX;
4813 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4816 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4817 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4818 scalar_int_mode int_mode, int_cmp_mode;
4819 if (code == NE
4820 && op1 == const0_rtx
4821 && is_int_mode (mode, &int_mode)
4822 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4823 /* ??? Work-around BImode bugs in the ia64 backend. */
4824 && int_mode != BImode
4825 && int_cmp_mode != BImode
4826 && nonzero_bits (op0, int_cmp_mode) == 1
4827 && STORE_FLAG_VALUE == 1)
4828 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4829 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
4830 : lowpart_subreg (int_mode, op0, int_cmp_mode);
4832 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4833 if ((code == EQ || code == NE)
4834 && op1 == const0_rtx
4835 && op0code == XOR)
4836 return simplify_gen_relational (code, mode, cmp_mode,
4837 XEXP (op0, 0), XEXP (op0, 1));
4839 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4840 if ((code == EQ || code == NE)
4841 && op0code == XOR
4842 && rtx_equal_p (XEXP (op0, 0), op1)
4843 && !side_effects_p (XEXP (op0, 0)))
4844 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4845 CONST0_RTX (mode));
4847 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4848 if ((code == EQ || code == NE)
4849 && op0code == XOR
4850 && rtx_equal_p (XEXP (op0, 1), op1)
4851 && !side_effects_p (XEXP (op0, 1)))
4852 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4853 CONST0_RTX (mode));
4855 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4856 if ((code == EQ || code == NE)
4857 && op0code == XOR
4858 && CONST_SCALAR_INT_P (op1)
4859 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4860 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4861 simplify_gen_binary (XOR, cmp_mode,
4862 XEXP (op0, 1), op1));
4864 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4865 can be implemented with a BICS instruction on some targets, or
4866 constant-folded if y is a constant. */
4867 if ((code == EQ || code == NE)
4868 && op0code == AND
4869 && rtx_equal_p (XEXP (op0, 0), op1)
4870 && !side_effects_p (op1)
4871 && op1 != CONST0_RTX (cmp_mode))
4873 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4874 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4876 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4877 CONST0_RTX (cmp_mode));
4880 /* Likewise for (eq/ne (and x y) y). */
4881 if ((code == EQ || code == NE)
4882 && op0code == AND
4883 && rtx_equal_p (XEXP (op0, 1), op1)
4884 && !side_effects_p (op1)
4885 && op1 != CONST0_RTX (cmp_mode))
4887 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4888 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4890 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4891 CONST0_RTX (cmp_mode));
4894 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4895 if ((code == EQ || code == NE)
4896 && GET_CODE (op0) == BSWAP
4897 && CONST_SCALAR_INT_P (op1))
4898 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4899 simplify_gen_unary (BSWAP, cmp_mode,
4900 op1, cmp_mode));
4902 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4903 if ((code == EQ || code == NE)
4904 && GET_CODE (op0) == BSWAP
4905 && GET_CODE (op1) == BSWAP)
4906 return simplify_gen_relational (code, mode, cmp_mode,
4907 XEXP (op0, 0), XEXP (op1, 0));
4909 if (op0code == POPCOUNT && op1 == const0_rtx)
4910 switch (code)
4912 case EQ:
4913 case LE:
4914 case LEU:
4915 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4916 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4917 XEXP (op0, 0), const0_rtx);
4919 case NE:
4920 case GT:
4921 case GTU:
4922 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4923 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4924 XEXP (op0, 0), const0_rtx);
4926 default:
4927 break;
4930 return NULL_RTX;
4933 enum
4935 CMP_EQ = 1,
4936 CMP_LT = 2,
4937 CMP_GT = 4,
4938 CMP_LTU = 8,
4939 CMP_GTU = 16
4943 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4944 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4945 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4946 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4947 For floating-point comparisons, assume that the operands were ordered. */
4949 static rtx
4950 comparison_result (enum rtx_code code, int known_results)
4952 switch (code)
4954 case EQ:
4955 case UNEQ:
4956 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4957 case NE:
4958 case LTGT:
4959 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4961 case LT:
4962 case UNLT:
4963 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4964 case GE:
4965 case UNGE:
4966 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4968 case GT:
4969 case UNGT:
4970 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4971 case LE:
4972 case UNLE:
4973 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4975 case LTU:
4976 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4977 case GEU:
4978 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4980 case GTU:
4981 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4982 case LEU:
4983 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4985 case ORDERED:
4986 return const_true_rtx;
4987 case UNORDERED:
4988 return const0_rtx;
4989 default:
4990 gcc_unreachable ();
4994 /* Check if the given comparison (done in the given MODE) is actually
4995 a tautology or a contradiction. If the mode is VOID_mode, the
4996 comparison is done in "infinite precision". If no simplification
4997 is possible, this function returns zero. Otherwise, it returns
4998 either const_true_rtx or const0_rtx. */
5001 simplify_const_relational_operation (enum rtx_code code,
5002 machine_mode mode,
5003 rtx op0, rtx op1)
5005 rtx tem;
5006 rtx trueop0;
5007 rtx trueop1;
5009 gcc_assert (mode != VOIDmode
5010 || (GET_MODE (op0) == VOIDmode
5011 && GET_MODE (op1) == VOIDmode));
5013 /* If op0 is a compare, extract the comparison arguments from it. */
5014 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5016 op1 = XEXP (op0, 1);
5017 op0 = XEXP (op0, 0);
5019 if (GET_MODE (op0) != VOIDmode)
5020 mode = GET_MODE (op0);
5021 else if (GET_MODE (op1) != VOIDmode)
5022 mode = GET_MODE (op1);
5023 else
5024 return 0;
5027 /* We can't simplify MODE_CC values since we don't know what the
5028 actual comparison is. */
5029 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5030 return 0;
5032 /* Make sure the constant is second. */
5033 if (swap_commutative_operands_p (op0, op1))
5035 std::swap (op0, op1);
5036 code = swap_condition (code);
5039 trueop0 = avoid_constant_pool_reference (op0);
5040 trueop1 = avoid_constant_pool_reference (op1);
5042 /* For integer comparisons of A and B maybe we can simplify A - B and can
5043 then simplify a comparison of that with zero. If A and B are both either
5044 a register or a CONST_INT, this can't help; testing for these cases will
5045 prevent infinite recursion here and speed things up.
5047 We can only do this for EQ and NE comparisons as otherwise we may
5048 lose or introduce overflow which we cannot disregard as undefined as
5049 we do not know the signedness of the operation on either the left or
5050 the right hand side of the comparison. */
5052 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5053 && (code == EQ || code == NE)
5054 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5055 && (REG_P (op1) || CONST_INT_P (trueop1)))
5056 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5057 /* We cannot do this if tem is a nonzero address. */
5058 && ! nonzero_address_p (tem))
5059 return simplify_const_relational_operation (signed_condition (code),
5060 mode, tem, const0_rtx);
5062 if (! HONOR_NANS (mode) && code == ORDERED)
5063 return const_true_rtx;
5065 if (! HONOR_NANS (mode) && code == UNORDERED)
5066 return const0_rtx;
5068 /* For modes without NaNs, if the two operands are equal, we know the
5069 result except if they have side-effects. Even with NaNs we know
5070 the result of unordered comparisons and, if signaling NaNs are
5071 irrelevant, also the result of LT/GT/LTGT. */
5072 if ((! HONOR_NANS (trueop0)
5073 || code == UNEQ || code == UNLE || code == UNGE
5074 || ((code == LT || code == GT || code == LTGT)
5075 && ! HONOR_SNANS (trueop0)))
5076 && rtx_equal_p (trueop0, trueop1)
5077 && ! side_effects_p (trueop0))
5078 return comparison_result (code, CMP_EQ);
5080 /* If the operands are floating-point constants, see if we can fold
5081 the result. */
5082 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5083 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5084 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5086 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5087 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5089 /* Comparisons are unordered iff at least one of the values is NaN. */
5090 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5091 switch (code)
5093 case UNEQ:
5094 case UNLT:
5095 case UNGT:
5096 case UNLE:
5097 case UNGE:
5098 case NE:
5099 case UNORDERED:
5100 return const_true_rtx;
5101 case EQ:
5102 case LT:
5103 case GT:
5104 case LE:
5105 case GE:
5106 case LTGT:
5107 case ORDERED:
5108 return const0_rtx;
5109 default:
5110 return 0;
5113 return comparison_result (code,
5114 (real_equal (d0, d1) ? CMP_EQ :
5115 real_less (d0, d1) ? CMP_LT : CMP_GT));
5118 /* Otherwise, see if the operands are both integers. */
5119 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5120 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5122 /* It would be nice if we really had a mode here. However, the
5123 largest int representable on the target is as good as
5124 infinite. */
5125 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5126 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5127 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5129 if (wi::eq_p (ptrueop0, ptrueop1))
5130 return comparison_result (code, CMP_EQ);
5131 else
5133 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5134 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5135 return comparison_result (code, cr);
5139 /* Optimize comparisons with upper and lower bounds. */
5140 scalar_int_mode int_mode;
5141 if (CONST_INT_P (trueop1)
5142 && is_a <scalar_int_mode> (mode, &int_mode)
5143 && HWI_COMPUTABLE_MODE_P (int_mode)
5144 && !side_effects_p (trueop0))
5146 int sign;
5147 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5148 HOST_WIDE_INT val = INTVAL (trueop1);
5149 HOST_WIDE_INT mmin, mmax;
5151 if (code == GEU
5152 || code == LEU
5153 || code == GTU
5154 || code == LTU)
5155 sign = 0;
5156 else
5157 sign = 1;
5159 /* Get a reduced range if the sign bit is zero. */
5160 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5162 mmin = 0;
5163 mmax = nonzero;
5165 else
5167 rtx mmin_rtx, mmax_rtx;
5168 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5170 mmin = INTVAL (mmin_rtx);
5171 mmax = INTVAL (mmax_rtx);
5172 if (sign)
5174 unsigned int sign_copies
5175 = num_sign_bit_copies (trueop0, int_mode);
5177 mmin >>= (sign_copies - 1);
5178 mmax >>= (sign_copies - 1);
5182 switch (code)
5184 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5185 case GEU:
5186 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5187 return const_true_rtx;
5188 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5189 return const0_rtx;
5190 break;
5191 case GE:
5192 if (val <= mmin)
5193 return const_true_rtx;
5194 if (val > mmax)
5195 return const0_rtx;
5196 break;
5198 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5199 case LEU:
5200 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5201 return const_true_rtx;
5202 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5203 return const0_rtx;
5204 break;
5205 case LE:
5206 if (val >= mmax)
5207 return const_true_rtx;
5208 if (val < mmin)
5209 return const0_rtx;
5210 break;
5212 case EQ:
5213 /* x == y is always false for y out of range. */
5214 if (val < mmin || val > mmax)
5215 return const0_rtx;
5216 break;
5218 /* x > y is always false for y >= mmax, always true for y < mmin. */
5219 case GTU:
5220 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5221 return const0_rtx;
5222 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5223 return const_true_rtx;
5224 break;
5225 case GT:
5226 if (val >= mmax)
5227 return const0_rtx;
5228 if (val < mmin)
5229 return const_true_rtx;
5230 break;
5232 /* x < y is always false for y <= mmin, always true for y > mmax. */
5233 case LTU:
5234 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5235 return const0_rtx;
5236 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5237 return const_true_rtx;
5238 break;
5239 case LT:
5240 if (val <= mmin)
5241 return const0_rtx;
5242 if (val > mmax)
5243 return const_true_rtx;
5244 break;
5246 case NE:
5247 /* x != y is always true for y out of range. */
5248 if (val < mmin || val > mmax)
5249 return const_true_rtx;
5250 break;
5252 default:
5253 break;
5257 /* Optimize integer comparisons with zero. */
5258 if (is_a <scalar_int_mode> (mode, &int_mode)
5259 && trueop1 == const0_rtx
5260 && !side_effects_p (trueop0))
5262 /* Some addresses are known to be nonzero. We don't know
5263 their sign, but equality comparisons are known. */
5264 if (nonzero_address_p (trueop0))
5266 if (code == EQ || code == LEU)
5267 return const0_rtx;
5268 if (code == NE || code == GTU)
5269 return const_true_rtx;
5272 /* See if the first operand is an IOR with a constant. If so, we
5273 may be able to determine the result of this comparison. */
5274 if (GET_CODE (op0) == IOR)
5276 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5277 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5279 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5280 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5281 && (UINTVAL (inner_const)
5282 & (HOST_WIDE_INT_1U
5283 << sign_bitnum)));
5285 switch (code)
5287 case EQ:
5288 case LEU:
5289 return const0_rtx;
5290 case NE:
5291 case GTU:
5292 return const_true_rtx;
5293 case LT:
5294 case LE:
5295 if (has_sign)
5296 return const_true_rtx;
5297 break;
5298 case GT:
5299 case GE:
5300 if (has_sign)
5301 return const0_rtx;
5302 break;
5303 default:
5304 break;
5310 /* Optimize comparison of ABS with zero. */
5311 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5312 && (GET_CODE (trueop0) == ABS
5313 || (GET_CODE (trueop0) == FLOAT_EXTEND
5314 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5316 switch (code)
5318 case LT:
5319 /* Optimize abs(x) < 0.0. */
5320 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5321 return const0_rtx;
5322 break;
5324 case GE:
5325 /* Optimize abs(x) >= 0.0. */
5326 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5327 return const_true_rtx;
5328 break;
5330 case UNGE:
5331 /* Optimize ! (abs(x) < 0.0). */
5332 return const_true_rtx;
5334 default:
5335 break;
5339 return 0;
5342 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5343 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5344 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5345 can be simplified to that or NULL_RTX if not.
5346 Assume X is compared against zero with CMP_CODE and the true
5347 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5349 static rtx
5350 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5352 if (cmp_code != EQ && cmp_code != NE)
5353 return NULL_RTX;
5355 /* Result on X == 0 and X !=0 respectively. */
5356 rtx on_zero, on_nonzero;
5357 if (cmp_code == EQ)
5359 on_zero = true_val;
5360 on_nonzero = false_val;
5362 else
5364 on_zero = false_val;
5365 on_nonzero = true_val;
5368 rtx_code op_code = GET_CODE (on_nonzero);
5369 if ((op_code != CLZ && op_code != CTZ)
5370 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5371 || !CONST_INT_P (on_zero))
5372 return NULL_RTX;
5374 HOST_WIDE_INT op_val;
5375 scalar_int_mode mode ATTRIBUTE_UNUSED
5376 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5377 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5378 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5379 && op_val == INTVAL (on_zero))
5380 return on_nonzero;
5382 return NULL_RTX;
5386 /* Simplify CODE, an operation with result mode MODE and three operands,
5387 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5388 a constant. Return 0 if no simplifications is possible. */
5391 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5392 machine_mode op0_mode, rtx op0, rtx op1,
5393 rtx op2)
5395 bool any_change = false;
5396 rtx tem, trueop2;
5397 scalar_int_mode int_mode, int_op0_mode;
5399 switch (code)
5401 case FMA:
5402 /* Simplify negations around the multiplication. */
5403 /* -a * -b + c => a * b + c. */
5404 if (GET_CODE (op0) == NEG)
5406 tem = simplify_unary_operation (NEG, mode, op1, mode);
5407 if (tem)
5408 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5410 else if (GET_CODE (op1) == NEG)
5412 tem = simplify_unary_operation (NEG, mode, op0, mode);
5413 if (tem)
5414 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5417 /* Canonicalize the two multiplication operands. */
5418 /* a * -b + c => -b * a + c. */
5419 if (swap_commutative_operands_p (op0, op1))
5420 std::swap (op0, op1), any_change = true;
5422 if (any_change)
5423 return gen_rtx_FMA (mode, op0, op1, op2);
5424 return NULL_RTX;
5426 case SIGN_EXTRACT:
5427 case ZERO_EXTRACT:
5428 if (CONST_INT_P (op0)
5429 && CONST_INT_P (op1)
5430 && CONST_INT_P (op2)
5431 && is_a <scalar_int_mode> (mode, &int_mode)
5432 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5433 && HWI_COMPUTABLE_MODE_P (int_mode))
5435 /* Extracting a bit-field from a constant */
5436 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5437 HOST_WIDE_INT op1val = INTVAL (op1);
5438 HOST_WIDE_INT op2val = INTVAL (op2);
5439 if (!BITS_BIG_ENDIAN)
5440 val >>= op2val;
5441 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5442 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5443 else
5444 /* Not enough information to calculate the bit position. */
5445 break;
5447 if (HOST_BITS_PER_WIDE_INT != op1val)
5449 /* First zero-extend. */
5450 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5451 /* If desired, propagate sign bit. */
5452 if (code == SIGN_EXTRACT
5453 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5454 != 0)
5455 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5458 return gen_int_mode (val, int_mode);
5460 break;
5462 case IF_THEN_ELSE:
5463 if (CONST_INT_P (op0))
5464 return op0 != const0_rtx ? op1 : op2;
5466 /* Convert c ? a : a into "a". */
5467 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5468 return op1;
5470 /* Convert a != b ? a : b into "a". */
5471 if (GET_CODE (op0) == NE
5472 && ! side_effects_p (op0)
5473 && ! HONOR_NANS (mode)
5474 && ! HONOR_SIGNED_ZEROS (mode)
5475 && ((rtx_equal_p (XEXP (op0, 0), op1)
5476 && rtx_equal_p (XEXP (op0, 1), op2))
5477 || (rtx_equal_p (XEXP (op0, 0), op2)
5478 && rtx_equal_p (XEXP (op0, 1), op1))))
5479 return op1;
5481 /* Convert a == b ? a : b into "b". */
5482 if (GET_CODE (op0) == EQ
5483 && ! side_effects_p (op0)
5484 && ! HONOR_NANS (mode)
5485 && ! HONOR_SIGNED_ZEROS (mode)
5486 && ((rtx_equal_p (XEXP (op0, 0), op1)
5487 && rtx_equal_p (XEXP (op0, 1), op2))
5488 || (rtx_equal_p (XEXP (op0, 0), op2)
5489 && rtx_equal_p (XEXP (op0, 1), op1))))
5490 return op2;
5492 /* Convert (!c) != {0,...,0} ? a : b into
5493 c != {0,...,0} ? b : a for vector modes. */
5494 if (VECTOR_MODE_P (GET_MODE (op1))
5495 && GET_CODE (op0) == NE
5496 && GET_CODE (XEXP (op0, 0)) == NOT
5497 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5499 rtx cv = XEXP (op0, 1);
5500 int nunits = CONST_VECTOR_NUNITS (cv);
5501 bool ok = true;
5502 for (int i = 0; i < nunits; ++i)
5503 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5505 ok = false;
5506 break;
5508 if (ok)
5510 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5511 XEXP (XEXP (op0, 0), 0),
5512 XEXP (op0, 1));
5513 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5514 return retval;
5518 /* Convert x == 0 ? N : clz (x) into clz (x) when
5519 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5520 Similarly for ctz (x). */
5521 if (COMPARISON_P (op0) && !side_effects_p (op0)
5522 && XEXP (op0, 1) == const0_rtx)
5524 rtx simplified
5525 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5526 op1, op2);
5527 if (simplified)
5528 return simplified;
5531 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5533 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5534 ? GET_MODE (XEXP (op0, 1))
5535 : GET_MODE (XEXP (op0, 0)));
5536 rtx temp;
5538 /* Look for happy constants in op1 and op2. */
5539 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5541 HOST_WIDE_INT t = INTVAL (op1);
5542 HOST_WIDE_INT f = INTVAL (op2);
5544 if (t == STORE_FLAG_VALUE && f == 0)
5545 code = GET_CODE (op0);
5546 else if (t == 0 && f == STORE_FLAG_VALUE)
5548 enum rtx_code tmp;
5549 tmp = reversed_comparison_code (op0, NULL);
5550 if (tmp == UNKNOWN)
5551 break;
5552 code = tmp;
5554 else
5555 break;
5557 return simplify_gen_relational (code, mode, cmp_mode,
5558 XEXP (op0, 0), XEXP (op0, 1));
5561 if (cmp_mode == VOIDmode)
5562 cmp_mode = op0_mode;
5563 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5564 cmp_mode, XEXP (op0, 0),
5565 XEXP (op0, 1));
5567 /* See if any simplifications were possible. */
5568 if (temp)
5570 if (CONST_INT_P (temp))
5571 return temp == const0_rtx ? op2 : op1;
5572 else if (temp)
5573 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5576 break;
5578 case VEC_MERGE:
5579 gcc_assert (GET_MODE (op0) == mode);
5580 gcc_assert (GET_MODE (op1) == mode);
5581 gcc_assert (VECTOR_MODE_P (mode));
5582 trueop2 = avoid_constant_pool_reference (op2);
5583 if (CONST_INT_P (trueop2))
5585 int elt_size = GET_MODE_UNIT_SIZE (mode);
5586 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5587 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5588 unsigned HOST_WIDE_INT mask;
5589 if (n_elts == HOST_BITS_PER_WIDE_INT)
5590 mask = -1;
5591 else
5592 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5594 if (!(sel & mask) && !side_effects_p (op0))
5595 return op1;
5596 if ((sel & mask) == mask && !side_effects_p (op1))
5597 return op0;
5599 rtx trueop0 = avoid_constant_pool_reference (op0);
5600 rtx trueop1 = avoid_constant_pool_reference (op1);
5601 if (GET_CODE (trueop0) == CONST_VECTOR
5602 && GET_CODE (trueop1) == CONST_VECTOR)
5604 rtvec v = rtvec_alloc (n_elts);
5605 unsigned int i;
5607 for (i = 0; i < n_elts; i++)
5608 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5609 ? CONST_VECTOR_ELT (trueop0, i)
5610 : CONST_VECTOR_ELT (trueop1, i));
5611 return gen_rtx_CONST_VECTOR (mode, v);
5614 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5615 if no element from a appears in the result. */
5616 if (GET_CODE (op0) == VEC_MERGE)
5618 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5619 if (CONST_INT_P (tem))
5621 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5622 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5623 return simplify_gen_ternary (code, mode, mode,
5624 XEXP (op0, 1), op1, op2);
5625 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5626 return simplify_gen_ternary (code, mode, mode,
5627 XEXP (op0, 0), op1, op2);
5630 if (GET_CODE (op1) == VEC_MERGE)
5632 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5633 if (CONST_INT_P (tem))
5635 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5636 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5637 return simplify_gen_ternary (code, mode, mode,
5638 op0, XEXP (op1, 1), op2);
5639 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5640 return simplify_gen_ternary (code, mode, mode,
5641 op0, XEXP (op1, 0), op2);
5645 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5646 with a. */
5647 if (GET_CODE (op0) == VEC_DUPLICATE
5648 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5649 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5650 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5652 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5653 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5655 if (XEXP (XEXP (op0, 0), 0) == op1
5656 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5657 return op1;
5662 if (rtx_equal_p (op0, op1)
5663 && !side_effects_p (op2) && !side_effects_p (op1))
5664 return op0;
5666 break;
5668 default:
5669 gcc_unreachable ();
5672 return 0;
5675 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5676 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5677 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5679 Works by unpacking OP into a collection of 8-bit values
5680 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5681 and then repacking them again for OUTERMODE. */
5683 static rtx
5684 simplify_immed_subreg (machine_mode outermode, rtx op,
5685 machine_mode innermode, unsigned int byte)
5687 enum {
5688 value_bit = 8,
5689 value_mask = (1 << value_bit) - 1
5691 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5692 int value_start;
5693 int i;
5694 int elem;
5696 int num_elem;
5697 rtx * elems;
5698 int elem_bitsize;
5699 rtx result_s = NULL;
5700 rtvec result_v = NULL;
5701 enum mode_class outer_class;
5702 scalar_mode outer_submode;
5703 int max_bitsize;
5705 /* Some ports misuse CCmode. */
5706 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5707 return op;
5709 /* We have no way to represent a complex constant at the rtl level. */
5710 if (COMPLEX_MODE_P (outermode))
5711 return NULL_RTX;
5713 /* We support any size mode. */
5714 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5715 GET_MODE_BITSIZE (innermode));
5717 /* Unpack the value. */
5719 if (GET_CODE (op) == CONST_VECTOR)
5721 num_elem = CONST_VECTOR_NUNITS (op);
5722 elems = &CONST_VECTOR_ELT (op, 0);
5723 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5725 else
5727 num_elem = 1;
5728 elems = &op;
5729 elem_bitsize = max_bitsize;
5731 /* If this asserts, it is too complicated; reducing value_bit may help. */
5732 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5733 /* I don't know how to handle endianness of sub-units. */
5734 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5736 for (elem = 0; elem < num_elem; elem++)
5738 unsigned char * vp;
5739 rtx el = elems[elem];
5741 /* Vectors are kept in target memory order. (This is probably
5742 a mistake.) */
5744 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5745 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5746 / BITS_PER_UNIT);
5747 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5748 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5749 unsigned bytele = (subword_byte % UNITS_PER_WORD
5750 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5751 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5754 switch (GET_CODE (el))
5756 case CONST_INT:
5757 for (i = 0;
5758 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5759 i += value_bit)
5760 *vp++ = INTVAL (el) >> i;
5761 /* CONST_INTs are always logically sign-extended. */
5762 for (; i < elem_bitsize; i += value_bit)
5763 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5764 break;
5766 case CONST_WIDE_INT:
5768 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
5769 unsigned char extend = wi::sign_mask (val);
5770 int prec = wi::get_precision (val);
5772 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5773 *vp++ = wi::extract_uhwi (val, i, value_bit);
5774 for (; i < elem_bitsize; i += value_bit)
5775 *vp++ = extend;
5777 break;
5779 case CONST_DOUBLE:
5780 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5782 unsigned char extend = 0;
5783 /* If this triggers, someone should have generated a
5784 CONST_INT instead. */
5785 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5787 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5788 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5789 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5791 *vp++
5792 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5793 i += value_bit;
5796 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5797 extend = -1;
5798 for (; i < elem_bitsize; i += value_bit)
5799 *vp++ = extend;
5801 else
5803 /* This is big enough for anything on the platform. */
5804 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5805 scalar_float_mode el_mode;
5807 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5808 int bitsize = GET_MODE_BITSIZE (el_mode);
5810 gcc_assert (bitsize <= elem_bitsize);
5811 gcc_assert (bitsize % value_bit == 0);
5813 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5814 GET_MODE (el));
5816 /* real_to_target produces its result in words affected by
5817 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5818 and use WORDS_BIG_ENDIAN instead; see the documentation
5819 of SUBREG in rtl.texi. */
5820 for (i = 0; i < bitsize; i += value_bit)
5822 int ibase;
5823 if (WORDS_BIG_ENDIAN)
5824 ibase = bitsize - 1 - i;
5825 else
5826 ibase = i;
5827 *vp++ = tmp[ibase / 32] >> i % 32;
5830 /* It shouldn't matter what's done here, so fill it with
5831 zero. */
5832 for (; i < elem_bitsize; i += value_bit)
5833 *vp++ = 0;
5835 break;
5837 case CONST_FIXED:
5838 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5840 for (i = 0; i < elem_bitsize; i += value_bit)
5841 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5843 else
5845 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5846 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5847 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5848 i += value_bit)
5849 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5850 >> (i - HOST_BITS_PER_WIDE_INT);
5851 for (; i < elem_bitsize; i += value_bit)
5852 *vp++ = 0;
5854 break;
5856 default:
5857 gcc_unreachable ();
5861 /* Now, pick the right byte to start with. */
5862 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5863 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5864 will already have offset 0. */
5865 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5867 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5868 - byte);
5869 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5870 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5871 byte = (subword_byte % UNITS_PER_WORD
5872 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5875 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5876 so if it's become negative it will instead be very large.) */
5877 gcc_assert (byte < GET_MODE_SIZE (innermode));
5879 /* Convert from bytes to chunks of size value_bit. */
5880 value_start = byte * (BITS_PER_UNIT / value_bit);
5882 /* Re-pack the value. */
5883 num_elem = GET_MODE_NUNITS (outermode);
5885 if (VECTOR_MODE_P (outermode))
5887 result_v = rtvec_alloc (num_elem);
5888 elems = &RTVEC_ELT (result_v, 0);
5890 else
5891 elems = &result_s;
5893 outer_submode = GET_MODE_INNER (outermode);
5894 outer_class = GET_MODE_CLASS (outer_submode);
5895 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5897 gcc_assert (elem_bitsize % value_bit == 0);
5898 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5900 for (elem = 0; elem < num_elem; elem++)
5902 unsigned char *vp;
5904 /* Vectors are stored in target memory order. (This is probably
5905 a mistake.) */
5907 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5908 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5909 / BITS_PER_UNIT);
5910 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5911 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5912 unsigned bytele = (subword_byte % UNITS_PER_WORD
5913 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5914 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5917 switch (outer_class)
5919 case MODE_INT:
5920 case MODE_PARTIAL_INT:
5922 int u;
5923 int base = 0;
5924 int units
5925 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5926 / HOST_BITS_PER_WIDE_INT;
5927 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5928 wide_int r;
5930 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5931 return NULL_RTX;
5932 for (u = 0; u < units; u++)
5934 unsigned HOST_WIDE_INT buf = 0;
5935 for (i = 0;
5936 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5937 i += value_bit)
5938 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5940 tmp[u] = buf;
5941 base += HOST_BITS_PER_WIDE_INT;
5943 r = wide_int::from_array (tmp, units,
5944 GET_MODE_PRECISION (outer_submode));
5945 #if TARGET_SUPPORTS_WIDE_INT == 0
5946 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5947 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5948 return NULL_RTX;
5949 #endif
5950 elems[elem] = immed_wide_int_const (r, outer_submode);
5952 break;
5954 case MODE_FLOAT:
5955 case MODE_DECIMAL_FLOAT:
5957 REAL_VALUE_TYPE r;
5958 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5960 /* real_from_target wants its input in words affected by
5961 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5962 and use WORDS_BIG_ENDIAN instead; see the documentation
5963 of SUBREG in rtl.texi. */
5964 for (i = 0; i < elem_bitsize; i += value_bit)
5966 int ibase;
5967 if (WORDS_BIG_ENDIAN)
5968 ibase = elem_bitsize - 1 - i;
5969 else
5970 ibase = i;
5971 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5974 real_from_target (&r, tmp, outer_submode);
5975 elems[elem] = const_double_from_real_value (r, outer_submode);
5977 break;
5979 case MODE_FRACT:
5980 case MODE_UFRACT:
5981 case MODE_ACCUM:
5982 case MODE_UACCUM:
5984 FIXED_VALUE_TYPE f;
5985 f.data.low = 0;
5986 f.data.high = 0;
5987 f.mode = outer_submode;
5989 for (i = 0;
5990 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5991 i += value_bit)
5992 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5993 for (; i < elem_bitsize; i += value_bit)
5994 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5995 << (i - HOST_BITS_PER_WIDE_INT));
5997 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5999 break;
6001 default:
6002 gcc_unreachable ();
6005 if (VECTOR_MODE_P (outermode))
6006 return gen_rtx_CONST_VECTOR (outermode, result_v);
6007 else
6008 return result_s;
6011 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6012 Return 0 if no simplifications are possible. */
6014 simplify_subreg (machine_mode outermode, rtx op,
6015 machine_mode innermode, unsigned int byte)
6017 /* Little bit of sanity checking. */
6018 gcc_assert (innermode != VOIDmode);
6019 gcc_assert (outermode != VOIDmode);
6020 gcc_assert (innermode != BLKmode);
6021 gcc_assert (outermode != BLKmode);
6023 gcc_assert (GET_MODE (op) == innermode
6024 || GET_MODE (op) == VOIDmode);
6026 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6027 return NULL_RTX;
6029 if (byte >= GET_MODE_SIZE (innermode))
6030 return NULL_RTX;
6032 if (outermode == innermode && !byte)
6033 return op;
6035 if (CONST_SCALAR_INT_P (op)
6036 || CONST_DOUBLE_AS_FLOAT_P (op)
6037 || GET_CODE (op) == CONST_FIXED
6038 || GET_CODE (op) == CONST_VECTOR)
6039 return simplify_immed_subreg (outermode, op, innermode, byte);
6041 /* Changing mode twice with SUBREG => just change it once,
6042 or not at all if changing back op starting mode. */
6043 if (GET_CODE (op) == SUBREG)
6045 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6046 rtx newx;
6048 if (outermode == innermostmode
6049 && byte == 0 && SUBREG_BYTE (op) == 0)
6050 return SUBREG_REG (op);
6052 /* Work out the memory offset of the final OUTERMODE value relative
6053 to the inner value of OP. */
6054 HOST_WIDE_INT mem_offset = subreg_memory_offset (outermode,
6055 innermode, byte);
6056 HOST_WIDE_INT op_mem_offset = subreg_memory_offset (op);
6057 HOST_WIDE_INT final_offset = mem_offset + op_mem_offset;
6059 /* See whether resulting subreg will be paradoxical. */
6060 if (!paradoxical_subreg_p (outermode, innermostmode))
6062 /* In nonparadoxical subregs we can't handle negative offsets. */
6063 if (final_offset < 0)
6064 return NULL_RTX;
6065 /* Bail out in case resulting subreg would be incorrect. */
6066 if (final_offset % GET_MODE_SIZE (outermode)
6067 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6068 return NULL_RTX;
6070 else
6072 HOST_WIDE_INT required_offset
6073 = subreg_memory_offset (outermode, innermostmode, 0);
6074 if (final_offset != required_offset)
6075 return NULL_RTX;
6076 /* Paradoxical subregs always have byte offset 0. */
6077 final_offset = 0;
6080 /* Recurse for further possible simplifications. */
6081 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6082 final_offset);
6083 if (newx)
6084 return newx;
6085 if (validate_subreg (outermode, innermostmode,
6086 SUBREG_REG (op), final_offset))
6088 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6089 if (SUBREG_PROMOTED_VAR_P (op)
6090 && SUBREG_PROMOTED_SIGN (op) >= 0
6091 && GET_MODE_CLASS (outermode) == MODE_INT
6092 && IN_RANGE (GET_MODE_SIZE (outermode),
6093 GET_MODE_SIZE (innermode),
6094 GET_MODE_SIZE (innermostmode))
6095 && subreg_lowpart_p (newx))
6097 SUBREG_PROMOTED_VAR_P (newx) = 1;
6098 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6100 return newx;
6102 return NULL_RTX;
6105 /* SUBREG of a hard register => just change the register number
6106 and/or mode. If the hard register is not valid in that mode,
6107 suppress this simplification. If the hard register is the stack,
6108 frame, or argument pointer, leave this as a SUBREG. */
6110 if (REG_P (op) && HARD_REGISTER_P (op))
6112 unsigned int regno, final_regno;
6114 regno = REGNO (op);
6115 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6116 if (HARD_REGISTER_NUM_P (final_regno))
6118 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6119 subreg_memory_offset (outermode,
6120 innermode, byte));
6122 /* Propagate original regno. We don't have any way to specify
6123 the offset inside original regno, so do so only for lowpart.
6124 The information is used only by alias analysis that can not
6125 grog partial register anyway. */
6127 if (subreg_lowpart_offset (outermode, innermode) == byte)
6128 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6129 return x;
6133 /* If we have a SUBREG of a register that we are replacing and we are
6134 replacing it with a MEM, make a new MEM and try replacing the
6135 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6136 or if we would be widening it. */
6138 if (MEM_P (op)
6139 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6140 /* Allow splitting of volatile memory references in case we don't
6141 have instruction to move the whole thing. */
6142 && (! MEM_VOLATILE_P (op)
6143 || ! have_insn_for (SET, innermode))
6144 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6145 return adjust_address_nv (op, outermode, byte);
6147 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6148 of two parts. */
6149 if (GET_CODE (op) == CONCAT
6150 || GET_CODE (op) == VEC_CONCAT)
6152 unsigned int part_size, final_offset;
6153 rtx part, res;
6155 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6156 if (part_mode == VOIDmode)
6157 part_mode = GET_MODE_INNER (GET_MODE (op));
6158 part_size = GET_MODE_SIZE (part_mode);
6159 if (byte < part_size)
6161 part = XEXP (op, 0);
6162 final_offset = byte;
6164 else
6166 part = XEXP (op, 1);
6167 final_offset = byte - part_size;
6170 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6171 return NULL_RTX;
6173 part_mode = GET_MODE (part);
6174 if (part_mode == VOIDmode)
6175 part_mode = GET_MODE_INNER (GET_MODE (op));
6176 res = simplify_subreg (outermode, part, part_mode, final_offset);
6177 if (res)
6178 return res;
6179 if (validate_subreg (outermode, part_mode, part, final_offset))
6180 return gen_rtx_SUBREG (outermode, part, final_offset);
6181 return NULL_RTX;
6184 /* A SUBREG resulting from a zero extension may fold to zero if
6185 it extracts higher bits that the ZERO_EXTEND's source bits. */
6186 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6188 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6189 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6190 return CONST0_RTX (outermode);
6193 scalar_int_mode int_outermode, int_innermode;
6194 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6195 && is_a <scalar_int_mode> (innermode, &int_innermode)
6196 && (GET_MODE_PRECISION (int_outermode)
6197 < GET_MODE_PRECISION (int_innermode))
6198 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6200 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6201 if (tem)
6202 return tem;
6205 return NULL_RTX;
6208 /* Make a SUBREG operation or equivalent if it folds. */
6211 simplify_gen_subreg (machine_mode outermode, rtx op,
6212 machine_mode innermode, unsigned int byte)
6214 rtx newx;
6216 newx = simplify_subreg (outermode, op, innermode, byte);
6217 if (newx)
6218 return newx;
6220 if (GET_CODE (op) == SUBREG
6221 || GET_CODE (op) == CONCAT
6222 || GET_MODE (op) == VOIDmode)
6223 return NULL_RTX;
6225 if (validate_subreg (outermode, innermode, op, byte))
6226 return gen_rtx_SUBREG (outermode, op, byte);
6228 return NULL_RTX;
6231 /* Generates a subreg to get the least significant part of EXPR (in mode
6232 INNER_MODE) to OUTER_MODE. */
6235 lowpart_subreg (machine_mode outer_mode, rtx expr,
6236 machine_mode inner_mode)
6238 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6239 subreg_lowpart_offset (outer_mode, inner_mode));
6242 /* Simplify X, an rtx expression.
6244 Return the simplified expression or NULL if no simplifications
6245 were possible.
6247 This is the preferred entry point into the simplification routines;
6248 however, we still allow passes to call the more specific routines.
6250 Right now GCC has three (yes, three) major bodies of RTL simplification
6251 code that need to be unified.
6253 1. fold_rtx in cse.c. This code uses various CSE specific
6254 information to aid in RTL simplification.
6256 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6257 it uses combine specific information to aid in RTL
6258 simplification.
6260 3. The routines in this file.
6263 Long term we want to only have one body of simplification code; to
6264 get to that state I recommend the following steps:
6266 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6267 which are not pass dependent state into these routines.
6269 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6270 use this routine whenever possible.
6272 3. Allow for pass dependent state to be provided to these
6273 routines and add simplifications based on the pass dependent
6274 state. Remove code from cse.c & combine.c that becomes
6275 redundant/dead.
6277 It will take time, but ultimately the compiler will be easier to
6278 maintain and improve. It's totally silly that when we add a
6279 simplification that it needs to be added to 4 places (3 for RTL
6280 simplification and 1 for tree simplification. */
6283 simplify_rtx (const_rtx x)
6285 const enum rtx_code code = GET_CODE (x);
6286 const machine_mode mode = GET_MODE (x);
6288 switch (GET_RTX_CLASS (code))
6290 case RTX_UNARY:
6291 return simplify_unary_operation (code, mode,
6292 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6293 case RTX_COMM_ARITH:
6294 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6295 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6297 /* Fall through. */
6299 case RTX_BIN_ARITH:
6300 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6302 case RTX_TERNARY:
6303 case RTX_BITFIELD_OPS:
6304 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6305 XEXP (x, 0), XEXP (x, 1),
6306 XEXP (x, 2));
6308 case RTX_COMPARE:
6309 case RTX_COMM_COMPARE:
6310 return simplify_relational_operation (code, mode,
6311 ((GET_MODE (XEXP (x, 0))
6312 != VOIDmode)
6313 ? GET_MODE (XEXP (x, 0))
6314 : GET_MODE (XEXP (x, 1))),
6315 XEXP (x, 0),
6316 XEXP (x, 1));
6318 case RTX_EXTRA:
6319 if (code == SUBREG)
6320 return simplify_subreg (mode, SUBREG_REG (x),
6321 GET_MODE (SUBREG_REG (x)),
6322 SUBREG_BYTE (x));
6323 break;
6325 case RTX_OBJ:
6326 if (code == LO_SUM)
6328 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6329 if (GET_CODE (XEXP (x, 0)) == HIGH
6330 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6331 return XEXP (x, 1);
6333 break;
6335 default:
6336 break;
6338 return NULL;