[26/77] Use is_a <scalar_int_mode> in subreg/extract simplifications
[official-gcc.git] / gcc / simplify-rtx.c
blob8473190b7a046ea7af3f5a2459501b94b149e614
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
328 break;
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
340 rtx newx;
342 offset += MEM_OFFSET (x);
344 newx = DECL_RTL (decl);
346 if (MEM_P (newx))
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
375 return x;
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
385 rtx tem;
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
391 return gen_rtx_fmt_e (code, mode, op);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
400 rtx tem;
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
417 rtx tem;
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
443 if (__builtin_expect (fn != NULL, 0))
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
452 switch (GET_RTX_CLASS (code))
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
494 case RTX_EXTRA:
495 if (code == SUBREG)
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
505 break;
507 case RTX_OBJ:
508 if (code == MEM)
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
515 else if (code == LO_SUM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
534 break;
536 default:
537 break;
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
554 if (newvec == vec)
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
561 RTVEC_ELT (newvec, j) = op;
564 break;
566 case 'e':
567 if (XEXP (x, i))
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
577 break;
579 return newx;
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
617 should be used.
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
625 (and:DI X Y)
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 scalar_int_mode int_mode, int_op_mode, subreg_mode;
646 gcc_assert (precision <= op_precision);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
730 and C2. */
731 if (GET_CODE (op) == AND
732 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
734 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
735 && CONST_INT_P (XEXP (op, 1)))
737 rtx op0 = (XEXP (XEXP (op, 0), 0));
738 rtx shift_op = XEXP (XEXP (op, 0), 1);
739 rtx mask_op = XEXP (op, 1);
740 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
741 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
743 if (shift < precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode) >> shift) & mask)
747 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
748 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
749 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
751 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
752 return simplify_gen_binary (AND, mode, op0, mask_op);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
758 changing len. */
759 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
760 && REG_P (XEXP (op, 0))
761 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
762 && CONST_INT_P (XEXP (op, 1))
763 && CONST_INT_P (XEXP (op, 2)))
765 rtx op0 = XEXP (op, 0);
766 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
767 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
768 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
773 pos -= op_precision - precision;
774 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
775 XEXP (op, 1), GEN_INT (pos));
778 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
780 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
781 if (op0)
782 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
783 XEXP (op, 1), XEXP (op, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && SCALAR_INT_MODE_P (mode)
791 && SCALAR_INT_MODE_P (op_mode)
792 && precision >= BITS_PER_WORD
793 && 2 * precision <= op_precision
794 && CONST_INT_P (XEXP (op, 1))
795 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
796 && UINTVAL (XEXP (op, 1)) < op_precision)
798 int byte = subreg_lowpart_offset (mode, op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op) == LSHIFTRT
810 || GET_CODE (op) == ASHIFTRT)
811 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
812 && MEM_P (XEXP (op, 0))
813 && CONST_INT_P (XEXP (op, 1))
814 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
815 && INTVAL (XEXP (op, 1)) > 0
816 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
817 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
818 MEM_ADDR_SPACE (XEXP (op, 0)))
819 && ! MEM_VOLATILE_P (XEXP (op, 0))
820 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
821 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
823 int byte = subreg_lowpart_offset (mode, int_op_mode);
824 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
825 return adjust_address_nv (XEXP (op, 0), mode,
826 (WORDS_BIG_ENDIAN
827 ? byte - shifted_bytes
828 : byte + shifted_bytes));
831 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
832 (OP:SI foo:SI) if OP is NEG or ABS. */
833 if ((GET_CODE (op) == ABS
834 || GET_CODE (op) == NEG)
835 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
836 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
837 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
838 return simplify_gen_unary (GET_CODE (op), mode,
839 XEXP (XEXP (op, 0), 0), mode);
841 /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 (truncate:A X). */
843 if (GET_CODE (op) == SUBREG
844 && is_a <scalar_int_mode> (mode, &int_mode)
845 && SCALAR_INT_MODE_P (op_mode)
846 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
847 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
848 && subreg_lowpart_p (op))
850 rtx inner = XEXP (SUBREG_REG (op), 0);
851 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
852 return simplify_gen_unary (TRUNCATE, int_mode, inner,
853 GET_MODE (inner));
854 else
855 /* If subreg above is paradoxical and C is narrower
856 than A, return (subreg:A (truncate:C X) 0). */
857 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
860 /* (truncate:A (truncate:B X)) is (truncate:A X). */
861 if (GET_CODE (op) == TRUNCATE)
862 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
863 GET_MODE (XEXP (op, 0)));
865 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
866 in mode A. */
867 if (GET_CODE (op) == IOR
868 && SCALAR_INT_MODE_P (mode)
869 && SCALAR_INT_MODE_P (op_mode)
870 && CONST_INT_P (XEXP (op, 1))
871 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
872 return constm1_rtx;
874 return NULL_RTX;
877 /* Try to simplify a unary operation CODE whose output mode is to be
878 MODE with input operand OP whose mode was originally OP_MODE.
879 Return zero if no simplification can be made. */
881 simplify_unary_operation (enum rtx_code code, machine_mode mode,
882 rtx op, machine_mode op_mode)
884 rtx trueop, tem;
886 trueop = avoid_constant_pool_reference (op);
888 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
889 if (tem)
890 return tem;
892 return simplify_unary_operation_1 (code, mode, op);
895 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
896 to be exact. */
898 static bool
899 exact_int_to_float_conversion_p (const_rtx op)
901 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
902 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
903 /* Constants shouldn't reach here. */
904 gcc_assert (op0_mode != VOIDmode);
905 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
906 int in_bits = in_prec;
907 if (HWI_COMPUTABLE_MODE_P (op0_mode))
909 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
910 if (GET_CODE (op) == FLOAT)
911 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
912 else if (GET_CODE (op) == UNSIGNED_FLOAT)
913 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
914 else
915 gcc_unreachable ();
916 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
918 return in_bits <= out_bits;
921 /* Perform some simplifications we can do even if the operands
922 aren't constant. */
923 static rtx
924 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
926 enum rtx_code reversed;
927 rtx temp;
928 scalar_int_mode inner, int_mode, op0_mode;
930 switch (code)
932 case NOT:
933 /* (not (not X)) == X. */
934 if (GET_CODE (op) == NOT)
935 return XEXP (op, 0);
937 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
938 comparison is all ones. */
939 if (COMPARISON_P (op)
940 && (mode == BImode || STORE_FLAG_VALUE == -1)
941 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
942 return simplify_gen_relational (reversed, mode, VOIDmode,
943 XEXP (op, 0), XEXP (op, 1));
945 /* (not (plus X -1)) can become (neg X). */
946 if (GET_CODE (op) == PLUS
947 && XEXP (op, 1) == constm1_rtx)
948 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
950 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
951 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
952 and MODE_VECTOR_INT. */
953 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
954 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
955 CONSTM1_RTX (mode));
957 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
958 if (GET_CODE (op) == XOR
959 && CONST_INT_P (XEXP (op, 1))
960 && (temp = simplify_unary_operation (NOT, mode,
961 XEXP (op, 1), mode)) != 0)
962 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
964 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
965 if (GET_CODE (op) == PLUS
966 && CONST_INT_P (XEXP (op, 1))
967 && mode_signbit_p (mode, XEXP (op, 1))
968 && (temp = simplify_unary_operation (NOT, mode,
969 XEXP (op, 1), mode)) != 0)
970 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
973 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
974 operands other than 1, but that is not valid. We could do a
975 similar simplification for (not (lshiftrt C X)) where C is
976 just the sign bit, but this doesn't seem common enough to
977 bother with. */
978 if (GET_CODE (op) == ASHIFT
979 && XEXP (op, 0) == const1_rtx)
981 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
982 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
985 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
986 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
987 so we can perform the above simplification. */
988 if (STORE_FLAG_VALUE == -1
989 && is_a <scalar_int_mode> (mode, &int_mode)
990 && GET_CODE (op) == ASHIFTRT
991 && CONST_INT_P (XEXP (op, 1))
992 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
993 return simplify_gen_relational (GE, int_mode, VOIDmode,
994 XEXP (op, 0), const0_rtx);
997 if (GET_CODE (op) == SUBREG
998 && subreg_lowpart_p (op)
999 && (GET_MODE_SIZE (GET_MODE (op))
1000 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
1001 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1002 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1004 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1005 rtx x;
1007 x = gen_rtx_ROTATE (inner_mode,
1008 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1009 inner_mode),
1010 XEXP (SUBREG_REG (op), 1));
1011 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1012 if (temp)
1013 return temp;
1016 /* Apply De Morgan's laws to reduce number of patterns for machines
1017 with negating logical insns (and-not, nand, etc.). If result has
1018 only one NOT, put it first, since that is how the patterns are
1019 coded. */
1020 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1022 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1023 machine_mode op_mode;
1025 op_mode = GET_MODE (in1);
1026 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1028 op_mode = GET_MODE (in2);
1029 if (op_mode == VOIDmode)
1030 op_mode = mode;
1031 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1033 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1034 std::swap (in1, in2);
1036 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1037 mode, in1, in2);
1040 /* (not (bswap x)) -> (bswap (not x)). */
1041 if (GET_CODE (op) == BSWAP)
1043 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1044 return simplify_gen_unary (BSWAP, mode, x, mode);
1046 break;
1048 case NEG:
1049 /* (neg (neg X)) == X. */
1050 if (GET_CODE (op) == NEG)
1051 return XEXP (op, 0);
1053 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1054 If comparison is not reversible use
1055 x ? y : (neg y). */
1056 if (GET_CODE (op) == IF_THEN_ELSE)
1058 rtx cond = XEXP (op, 0);
1059 rtx true_rtx = XEXP (op, 1);
1060 rtx false_rtx = XEXP (op, 2);
1062 if ((GET_CODE (true_rtx) == NEG
1063 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1064 || (GET_CODE (false_rtx) == NEG
1065 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1067 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1068 temp = reversed_comparison (cond, mode);
1069 else
1071 temp = cond;
1072 std::swap (true_rtx, false_rtx);
1074 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1075 mode, temp, true_rtx, false_rtx);
1079 /* (neg (plus X 1)) can become (not X). */
1080 if (GET_CODE (op) == PLUS
1081 && XEXP (op, 1) == const1_rtx)
1082 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1084 /* Similarly, (neg (not X)) is (plus X 1). */
1085 if (GET_CODE (op) == NOT)
1086 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1087 CONST1_RTX (mode));
1089 /* (neg (minus X Y)) can become (minus Y X). This transformation
1090 isn't safe for modes with signed zeros, since if X and Y are
1091 both +0, (minus Y X) is the same as (minus X Y). If the
1092 rounding mode is towards +infinity (or -infinity) then the two
1093 expressions will be rounded differently. */
1094 if (GET_CODE (op) == MINUS
1095 && !HONOR_SIGNED_ZEROS (mode)
1096 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1097 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1099 if (GET_CODE (op) == PLUS
1100 && !HONOR_SIGNED_ZEROS (mode)
1101 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1103 /* (neg (plus A C)) is simplified to (minus -C A). */
1104 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1105 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1107 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1108 if (temp)
1109 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1112 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1113 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1114 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1117 /* (neg (mult A B)) becomes (mult A (neg B)).
1118 This works even for floating-point values. */
1119 if (GET_CODE (op) == MULT
1120 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1122 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1123 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1126 /* NEG commutes with ASHIFT since it is multiplication. Only do
1127 this if we can then eliminate the NEG (e.g., if the operand
1128 is a constant). */
1129 if (GET_CODE (op) == ASHIFT)
1131 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1132 if (temp)
1133 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1136 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1137 C is equal to the width of MODE minus 1. */
1138 if (GET_CODE (op) == ASHIFTRT
1139 && CONST_INT_P (XEXP (op, 1))
1140 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1141 return simplify_gen_binary (LSHIFTRT, mode,
1142 XEXP (op, 0), XEXP (op, 1));
1144 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1145 C is equal to the width of MODE minus 1. */
1146 if (GET_CODE (op) == LSHIFTRT
1147 && CONST_INT_P (XEXP (op, 1))
1148 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1149 return simplify_gen_binary (ASHIFTRT, mode,
1150 XEXP (op, 0), XEXP (op, 1));
1152 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1153 if (GET_CODE (op) == XOR
1154 && XEXP (op, 1) == const1_rtx
1155 && nonzero_bits (XEXP (op, 0), mode) == 1)
1156 return plus_constant (mode, XEXP (op, 0), -1);
1158 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1159 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1160 if (GET_CODE (op) == LT
1161 && XEXP (op, 1) == const0_rtx
1162 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1164 int isize = GET_MODE_PRECISION (inner);
1165 if (STORE_FLAG_VALUE == 1)
1167 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1168 GEN_INT (isize - 1));
1169 if (mode == inner)
1170 return temp;
1171 if (GET_MODE_PRECISION (mode) > isize)
1172 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1173 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1175 else if (STORE_FLAG_VALUE == -1)
1177 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1178 GEN_INT (isize - 1));
1179 if (mode == inner)
1180 return temp;
1181 if (GET_MODE_PRECISION (mode) > isize)
1182 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1183 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1186 break;
1188 case TRUNCATE:
1189 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1190 with the umulXi3_highpart patterns. */
1191 if (GET_CODE (op) == LSHIFTRT
1192 && GET_CODE (XEXP (op, 0)) == MULT)
1193 break;
1195 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1197 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1199 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1200 if (temp)
1201 return temp;
1203 /* We can't handle truncation to a partial integer mode here
1204 because we don't know the real bitsize of the partial
1205 integer mode. */
1206 break;
1209 if (GET_MODE (op) != VOIDmode)
1211 temp = simplify_truncation (mode, op, GET_MODE (op));
1212 if (temp)
1213 return temp;
1216 /* If we know that the value is already truncated, we can
1217 replace the TRUNCATE with a SUBREG. */
1218 if (GET_MODE_NUNITS (mode) == 1
1219 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1220 || truncated_to_mode (mode, op)))
1222 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1223 if (temp)
1224 return temp;
1227 /* A truncate of a comparison can be replaced with a subreg if
1228 STORE_FLAG_VALUE permits. This is like the previous test,
1229 but it works even if the comparison is done in a mode larger
1230 than HOST_BITS_PER_WIDE_INT. */
1231 if (HWI_COMPUTABLE_MODE_P (mode)
1232 && COMPARISON_P (op)
1233 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1235 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1236 if (temp)
1237 return temp;
1240 /* A truncate of a memory is just loading the low part of the memory
1241 if we are not changing the meaning of the address. */
1242 if (GET_CODE (op) == MEM
1243 && !VECTOR_MODE_P (mode)
1244 && !MEM_VOLATILE_P (op)
1245 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1247 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1248 if (temp)
1249 return temp;
1252 break;
1254 case FLOAT_TRUNCATE:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1258 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1259 if (GET_CODE (op) == FLOAT_EXTEND
1260 && GET_MODE (XEXP (op, 0)) == mode)
1261 return XEXP (op, 0);
1263 /* (float_truncate:SF (float_truncate:DF foo:XF))
1264 = (float_truncate:SF foo:XF).
1265 This may eliminate double rounding, so it is unsafe.
1267 (float_truncate:SF (float_extend:XF foo:DF))
1268 = (float_truncate:SF foo:DF).
1270 (float_truncate:DF (float_extend:XF foo:SF))
1271 = (float_extend:DF foo:SF). */
1272 if ((GET_CODE (op) == FLOAT_TRUNCATE
1273 && flag_unsafe_math_optimizations)
1274 || GET_CODE (op) == FLOAT_EXTEND)
1275 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1276 0)))
1277 > GET_MODE_SIZE (mode)
1278 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1279 mode,
1280 XEXP (op, 0), mode);
1282 /* (float_truncate (float x)) is (float x) */
1283 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1284 && (flag_unsafe_math_optimizations
1285 || exact_int_to_float_conversion_p (op)))
1286 return simplify_gen_unary (GET_CODE (op), mode,
1287 XEXP (op, 0),
1288 GET_MODE (XEXP (op, 0)));
1290 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1291 (OP:SF foo:SF) if OP is NEG or ABS. */
1292 if ((GET_CODE (op) == ABS
1293 || GET_CODE (op) == NEG)
1294 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1295 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1296 return simplify_gen_unary (GET_CODE (op), mode,
1297 XEXP (XEXP (op, 0), 0), mode);
1299 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1300 is (float_truncate:SF x). */
1301 if (GET_CODE (op) == SUBREG
1302 && subreg_lowpart_p (op)
1303 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1304 return SUBREG_REG (op);
1305 break;
1307 case FLOAT_EXTEND:
1308 if (DECIMAL_FLOAT_MODE_P (mode))
1309 break;
1311 /* (float_extend (float_extend x)) is (float_extend x)
1313 (float_extend (float x)) is (float x) assuming that double
1314 rounding can't happen.
1316 if (GET_CODE (op) == FLOAT_EXTEND
1317 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1318 && exact_int_to_float_conversion_p (op)))
1319 return simplify_gen_unary (GET_CODE (op), mode,
1320 XEXP (op, 0),
1321 GET_MODE (XEXP (op, 0)));
1323 break;
1325 case ABS:
1326 /* (abs (neg <foo>)) -> (abs <foo>) */
1327 if (GET_CODE (op) == NEG)
1328 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1329 GET_MODE (XEXP (op, 0)));
1331 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1332 do nothing. */
1333 if (GET_MODE (op) == VOIDmode)
1334 break;
1336 /* If operand is something known to be positive, ignore the ABS. */
1337 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1338 || val_signbit_known_clear_p (GET_MODE (op),
1339 nonzero_bits (op, GET_MODE (op))))
1340 return op;
1342 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1343 if (is_a <scalar_int_mode> (mode, &int_mode)
1344 && (num_sign_bit_copies (op, int_mode)
1345 == GET_MODE_PRECISION (int_mode)))
1346 return gen_rtx_NEG (int_mode, op);
1348 break;
1350 case FFS:
1351 /* (ffs (*_extend <X>)) = (ffs <X>) */
1352 if (GET_CODE (op) == SIGN_EXTEND
1353 || GET_CODE (op) == ZERO_EXTEND)
1354 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1355 GET_MODE (XEXP (op, 0)));
1356 break;
1358 case POPCOUNT:
1359 switch (GET_CODE (op))
1361 case BSWAP:
1362 case ZERO_EXTEND:
1363 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1364 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1365 GET_MODE (XEXP (op, 0)));
1367 case ROTATE:
1368 case ROTATERT:
1369 /* Rotations don't affect popcount. */
1370 if (!side_effects_p (XEXP (op, 1)))
1371 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1372 GET_MODE (XEXP (op, 0)));
1373 break;
1375 default:
1376 break;
1378 break;
1380 case PARITY:
1381 switch (GET_CODE (op))
1383 case NOT:
1384 case BSWAP:
1385 case ZERO_EXTEND:
1386 case SIGN_EXTEND:
1387 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1388 GET_MODE (XEXP (op, 0)));
1390 case ROTATE:
1391 case ROTATERT:
1392 /* Rotations don't affect parity. */
1393 if (!side_effects_p (XEXP (op, 1)))
1394 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1395 GET_MODE (XEXP (op, 0)));
1396 break;
1398 default:
1399 break;
1401 break;
1403 case BSWAP:
1404 /* (bswap (bswap x)) -> x. */
1405 if (GET_CODE (op) == BSWAP)
1406 return XEXP (op, 0);
1407 break;
1409 case FLOAT:
1410 /* (float (sign_extend <X>)) = (float <X>). */
1411 if (GET_CODE (op) == SIGN_EXTEND)
1412 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1413 GET_MODE (XEXP (op, 0)));
1414 break;
1416 case SIGN_EXTEND:
1417 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1418 becomes just the MINUS if its mode is MODE. This allows
1419 folding switch statements on machines using casesi (such as
1420 the VAX). */
1421 if (GET_CODE (op) == TRUNCATE
1422 && GET_MODE (XEXP (op, 0)) == mode
1423 && GET_CODE (XEXP (op, 0)) == MINUS
1424 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1425 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1426 return XEXP (op, 0);
1428 /* Extending a widening multiplication should be canonicalized to
1429 a wider widening multiplication. */
1430 if (GET_CODE (op) == MULT)
1432 rtx lhs = XEXP (op, 0);
1433 rtx rhs = XEXP (op, 1);
1434 enum rtx_code lcode = GET_CODE (lhs);
1435 enum rtx_code rcode = GET_CODE (rhs);
1437 /* Widening multiplies usually extend both operands, but sometimes
1438 they use a shift to extract a portion of a register. */
1439 if ((lcode == SIGN_EXTEND
1440 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1441 && (rcode == SIGN_EXTEND
1442 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1444 machine_mode lmode = GET_MODE (lhs);
1445 machine_mode rmode = GET_MODE (rhs);
1446 int bits;
1448 if (lcode == ASHIFTRT)
1449 /* Number of bits not shifted off the end. */
1450 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1451 else /* lcode == SIGN_EXTEND */
1452 /* Size of inner mode. */
1453 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1455 if (rcode == ASHIFTRT)
1456 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1457 else /* rcode == SIGN_EXTEND */
1458 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1460 /* We can only widen multiplies if the result is mathematiclly
1461 equivalent. I.e. if overflow was impossible. */
1462 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1463 return simplify_gen_binary
1464 (MULT, mode,
1465 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1466 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1470 /* Check for a sign extension of a subreg of a promoted
1471 variable, where the promotion is sign-extended, and the
1472 target mode is the same as the variable's promotion. */
1473 if (GET_CODE (op) == SUBREG
1474 && SUBREG_PROMOTED_VAR_P (op)
1475 && SUBREG_PROMOTED_SIGNED_P (op)
1476 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1478 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1479 if (temp)
1480 return temp;
1483 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1484 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1485 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1487 gcc_assert (GET_MODE_PRECISION (mode)
1488 > GET_MODE_PRECISION (GET_MODE (op)));
1489 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1490 GET_MODE (XEXP (op, 0)));
1493 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1494 is (sign_extend:M (subreg:O <X>)) if there is mode with
1495 GET_MODE_BITSIZE (N) - I bits.
1496 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1497 is similarly (zero_extend:M (subreg:O <X>)). */
1498 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1499 && GET_CODE (XEXP (op, 0)) == ASHIFT
1500 && is_a <scalar_int_mode> (mode, &int_mode)
1501 && CONST_INT_P (XEXP (op, 1))
1502 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1503 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1505 scalar_int_mode tmode;
1506 gcc_assert (GET_MODE_BITSIZE (int_mode)
1507 > GET_MODE_BITSIZE (GET_MODE (op)));
1508 if (int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1509 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1511 rtx inner =
1512 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1513 if (inner)
1514 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1515 ? SIGN_EXTEND : ZERO_EXTEND,
1516 int_mode, inner, tmode);
1520 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1521 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1522 if (GET_CODE (op) == LSHIFTRT
1523 && CONST_INT_P (XEXP (op, 1))
1524 && XEXP (op, 1) != const0_rtx)
1525 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1527 #if defined(POINTERS_EXTEND_UNSIGNED)
1528 /* As we do not know which address space the pointer is referring to,
1529 we can do this only if the target does not support different pointer
1530 or address modes depending on the address space. */
1531 if (target_default_pointer_address_modes_p ()
1532 && ! POINTERS_EXTEND_UNSIGNED
1533 && mode == Pmode && GET_MODE (op) == ptr_mode
1534 && (CONSTANT_P (op)
1535 || (GET_CODE (op) == SUBREG
1536 && REG_P (SUBREG_REG (op))
1537 && REG_POINTER (SUBREG_REG (op))
1538 && GET_MODE (SUBREG_REG (op)) == Pmode))
1539 && !targetm.have_ptr_extend ())
1541 temp
1542 = convert_memory_address_addr_space_1 (Pmode, op,
1543 ADDR_SPACE_GENERIC, false,
1544 true);
1545 if (temp)
1546 return temp;
1548 #endif
1549 break;
1551 case ZERO_EXTEND:
1552 /* Check for a zero extension of a subreg of a promoted
1553 variable, where the promotion is zero-extended, and the
1554 target mode is the same as the variable's promotion. */
1555 if (GET_CODE (op) == SUBREG
1556 && SUBREG_PROMOTED_VAR_P (op)
1557 && SUBREG_PROMOTED_UNSIGNED_P (op)
1558 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1560 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1561 if (temp)
1562 return temp;
1565 /* Extending a widening multiplication should be canonicalized to
1566 a wider widening multiplication. */
1567 if (GET_CODE (op) == MULT)
1569 rtx lhs = XEXP (op, 0);
1570 rtx rhs = XEXP (op, 1);
1571 enum rtx_code lcode = GET_CODE (lhs);
1572 enum rtx_code rcode = GET_CODE (rhs);
1574 /* Widening multiplies usually extend both operands, but sometimes
1575 they use a shift to extract a portion of a register. */
1576 if ((lcode == ZERO_EXTEND
1577 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1578 && (rcode == ZERO_EXTEND
1579 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1581 machine_mode lmode = GET_MODE (lhs);
1582 machine_mode rmode = GET_MODE (rhs);
1583 int bits;
1585 if (lcode == LSHIFTRT)
1586 /* Number of bits not shifted off the end. */
1587 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1588 else /* lcode == ZERO_EXTEND */
1589 /* Size of inner mode. */
1590 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1592 if (rcode == LSHIFTRT)
1593 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1594 else /* rcode == ZERO_EXTEND */
1595 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1597 /* We can only widen multiplies if the result is mathematiclly
1598 equivalent. I.e. if overflow was impossible. */
1599 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1600 return simplify_gen_binary
1601 (MULT, mode,
1602 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1603 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1607 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1608 if (GET_CODE (op) == ZERO_EXTEND)
1609 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1610 GET_MODE (XEXP (op, 0)));
1612 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1613 is (zero_extend:M (subreg:O <X>)) if there is mode with
1614 GET_MODE_PRECISION (N) - I bits. */
1615 if (GET_CODE (op) == LSHIFTRT
1616 && GET_CODE (XEXP (op, 0)) == ASHIFT
1617 && is_a <scalar_int_mode> (mode, &int_mode)
1618 && CONST_INT_P (XEXP (op, 1))
1619 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1620 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1622 scalar_int_mode tmode;
1623 if (int_mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1624 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1626 rtx inner =
1627 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1628 if (inner)
1629 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1630 inner, tmode);
1634 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1635 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1636 of mode N. E.g.
1637 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1638 (and:SI (reg:SI) (const_int 63)). */
1639 if (GET_CODE (op) == SUBREG
1640 && is_a <scalar_int_mode> (mode, &int_mode)
1641 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1642 && GET_MODE_PRECISION (GET_MODE (op)) < GET_MODE_PRECISION (op0_mode)
1643 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1644 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1645 && subreg_lowpart_p (op)
1646 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1647 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1649 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1650 return SUBREG_REG (op);
1651 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1652 op0_mode);
1655 #if defined(POINTERS_EXTEND_UNSIGNED)
1656 /* As we do not know which address space the pointer is referring to,
1657 we can do this only if the target does not support different pointer
1658 or address modes depending on the address space. */
1659 if (target_default_pointer_address_modes_p ()
1660 && POINTERS_EXTEND_UNSIGNED > 0
1661 && mode == Pmode && GET_MODE (op) == ptr_mode
1662 && (CONSTANT_P (op)
1663 || (GET_CODE (op) == SUBREG
1664 && REG_P (SUBREG_REG (op))
1665 && REG_POINTER (SUBREG_REG (op))
1666 && GET_MODE (SUBREG_REG (op)) == Pmode))
1667 && !targetm.have_ptr_extend ())
1669 temp
1670 = convert_memory_address_addr_space_1 (Pmode, op,
1671 ADDR_SPACE_GENERIC, false,
1672 true);
1673 if (temp)
1674 return temp;
1676 #endif
1677 break;
1679 default:
1680 break;
1683 return 0;
1686 /* Try to compute the value of a unary operation CODE whose output mode is to
1687 be MODE with input operand OP whose mode was originally OP_MODE.
1688 Return zero if the value cannot be computed. */
1690 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1691 rtx op, machine_mode op_mode)
1693 unsigned int width = GET_MODE_PRECISION (mode);
1695 if (code == VEC_DUPLICATE)
1697 gcc_assert (VECTOR_MODE_P (mode));
1698 if (GET_MODE (op) != VOIDmode)
1700 if (!VECTOR_MODE_P (GET_MODE (op)))
1701 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1702 else
1703 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1704 (GET_MODE (op)));
1706 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1707 || GET_CODE (op) == CONST_VECTOR)
1709 int elt_size = GET_MODE_UNIT_SIZE (mode);
1710 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1711 rtvec v = rtvec_alloc (n_elts);
1712 unsigned int i;
1714 if (GET_CODE (op) != CONST_VECTOR)
1715 for (i = 0; i < n_elts; i++)
1716 RTVEC_ELT (v, i) = op;
1717 else
1719 machine_mode inmode = GET_MODE (op);
1720 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1721 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1723 gcc_assert (in_n_elts < n_elts);
1724 gcc_assert ((n_elts % in_n_elts) == 0);
1725 for (i = 0; i < n_elts; i++)
1726 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1728 return gen_rtx_CONST_VECTOR (mode, v);
1732 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1734 int elt_size = GET_MODE_UNIT_SIZE (mode);
1735 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1736 machine_mode opmode = GET_MODE (op);
1737 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1738 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1739 rtvec v = rtvec_alloc (n_elts);
1740 unsigned int i;
1742 gcc_assert (op_n_elts == n_elts);
1743 for (i = 0; i < n_elts; i++)
1745 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1746 CONST_VECTOR_ELT (op, i),
1747 GET_MODE_INNER (opmode));
1748 if (!x)
1749 return 0;
1750 RTVEC_ELT (v, i) = x;
1752 return gen_rtx_CONST_VECTOR (mode, v);
1755 /* The order of these tests is critical so that, for example, we don't
1756 check the wrong mode (input vs. output) for a conversion operation,
1757 such as FIX. At some point, this should be simplified. */
1759 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1761 REAL_VALUE_TYPE d;
1763 if (op_mode == VOIDmode)
1765 /* CONST_INT have VOIDmode as the mode. We assume that all
1766 the bits of the constant are significant, though, this is
1767 a dangerous assumption as many times CONST_INTs are
1768 created and used with garbage in the bits outside of the
1769 precision of the implied mode of the const_int. */
1770 op_mode = MAX_MODE_INT;
1773 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1775 /* Avoid the folding if flag_signaling_nans is on and
1776 operand is a signaling NaN. */
1777 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1778 return 0;
1780 d = real_value_truncate (mode, d);
1781 return const_double_from_real_value (d, mode);
1783 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1785 REAL_VALUE_TYPE d;
1787 if (op_mode == VOIDmode)
1789 /* CONST_INT have VOIDmode as the mode. We assume that all
1790 the bits of the constant are significant, though, this is
1791 a dangerous assumption as many times CONST_INTs are
1792 created and used with garbage in the bits outside of the
1793 precision of the implied mode of the const_int. */
1794 op_mode = MAX_MODE_INT;
1797 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1799 /* Avoid the folding if flag_signaling_nans is on and
1800 operand is a signaling NaN. */
1801 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1802 return 0;
1804 d = real_value_truncate (mode, d);
1805 return const_double_from_real_value (d, mode);
1808 if (CONST_SCALAR_INT_P (op) && width > 0)
1810 wide_int result;
1811 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1812 rtx_mode_t op0 = rtx_mode_t (op, imode);
1813 int int_value;
1815 #if TARGET_SUPPORTS_WIDE_INT == 0
1816 /* This assert keeps the simplification from producing a result
1817 that cannot be represented in a CONST_DOUBLE but a lot of
1818 upstream callers expect that this function never fails to
1819 simplify something and so you if you added this to the test
1820 above the code would die later anyway. If this assert
1821 happens, you just need to make the port support wide int. */
1822 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1823 #endif
1825 switch (code)
1827 case NOT:
1828 result = wi::bit_not (op0);
1829 break;
1831 case NEG:
1832 result = wi::neg (op0);
1833 break;
1835 case ABS:
1836 result = wi::abs (op0);
1837 break;
1839 case FFS:
1840 result = wi::shwi (wi::ffs (op0), mode);
1841 break;
1843 case CLZ:
1844 if (wi::ne_p (op0, 0))
1845 int_value = wi::clz (op0);
1846 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1847 int_value = GET_MODE_PRECISION (mode);
1848 result = wi::shwi (int_value, mode);
1849 break;
1851 case CLRSB:
1852 result = wi::shwi (wi::clrsb (op0), mode);
1853 break;
1855 case CTZ:
1856 if (wi::ne_p (op0, 0))
1857 int_value = wi::ctz (op0);
1858 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1859 int_value = GET_MODE_PRECISION (mode);
1860 result = wi::shwi (int_value, mode);
1861 break;
1863 case POPCOUNT:
1864 result = wi::shwi (wi::popcount (op0), mode);
1865 break;
1867 case PARITY:
1868 result = wi::shwi (wi::parity (op0), mode);
1869 break;
1871 case BSWAP:
1872 result = wide_int (op0).bswap ();
1873 break;
1875 case TRUNCATE:
1876 case ZERO_EXTEND:
1877 result = wide_int::from (op0, width, UNSIGNED);
1878 break;
1880 case SIGN_EXTEND:
1881 result = wide_int::from (op0, width, SIGNED);
1882 break;
1884 case SQRT:
1885 default:
1886 return 0;
1889 return immed_wide_int_const (result, mode);
1892 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1893 && SCALAR_FLOAT_MODE_P (mode)
1894 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1896 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1897 switch (code)
1899 case SQRT:
1900 return 0;
1901 case ABS:
1902 d = real_value_abs (&d);
1903 break;
1904 case NEG:
1905 d = real_value_negate (&d);
1906 break;
1907 case FLOAT_TRUNCATE:
1908 /* Don't perform the operation if flag_signaling_nans is on
1909 and the operand is a signaling NaN. */
1910 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1911 return NULL_RTX;
1912 d = real_value_truncate (mode, d);
1913 break;
1914 case FLOAT_EXTEND:
1915 /* Don't perform the operation if flag_signaling_nans is on
1916 and the operand is a signaling NaN. */
1917 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1918 return NULL_RTX;
1919 /* All this does is change the mode, unless changing
1920 mode class. */
1921 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1922 real_convert (&d, mode, &d);
1923 break;
1924 case FIX:
1925 /* Don't perform the operation if flag_signaling_nans is on
1926 and the operand is a signaling NaN. */
1927 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1928 return NULL_RTX;
1929 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1930 break;
1931 case NOT:
1933 long tmp[4];
1934 int i;
1936 real_to_target (tmp, &d, GET_MODE (op));
1937 for (i = 0; i < 4; i++)
1938 tmp[i] = ~tmp[i];
1939 real_from_target (&d, tmp, mode);
1940 break;
1942 default:
1943 gcc_unreachable ();
1945 return const_double_from_real_value (d, mode);
1947 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1948 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1949 && GET_MODE_CLASS (mode) == MODE_INT
1950 && width > 0)
1952 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1953 operators are intentionally left unspecified (to ease implementation
1954 by target backends), for consistency, this routine implements the
1955 same semantics for constant folding as used by the middle-end. */
1957 /* This was formerly used only for non-IEEE float.
1958 eggert@twinsun.com says it is safe for IEEE also. */
1959 REAL_VALUE_TYPE t;
1960 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1961 wide_int wmax, wmin;
1962 /* This is part of the abi to real_to_integer, but we check
1963 things before making this call. */
1964 bool fail;
1966 switch (code)
1968 case FIX:
1969 if (REAL_VALUE_ISNAN (*x))
1970 return const0_rtx;
1972 /* Test against the signed upper bound. */
1973 wmax = wi::max_value (width, SIGNED);
1974 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1975 if (real_less (&t, x))
1976 return immed_wide_int_const (wmax, mode);
1978 /* Test against the signed lower bound. */
1979 wmin = wi::min_value (width, SIGNED);
1980 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1981 if (real_less (x, &t))
1982 return immed_wide_int_const (wmin, mode);
1984 return immed_wide_int_const (real_to_integer (x, &fail, width),
1985 mode);
1987 case UNSIGNED_FIX:
1988 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1989 return const0_rtx;
1991 /* Test against the unsigned upper bound. */
1992 wmax = wi::max_value (width, UNSIGNED);
1993 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1994 if (real_less (&t, x))
1995 return immed_wide_int_const (wmax, mode);
1997 return immed_wide_int_const (real_to_integer (x, &fail, width),
1998 mode);
2000 default:
2001 gcc_unreachable ();
2005 return NULL_RTX;
2008 /* Subroutine of simplify_binary_operation to simplify a binary operation
2009 CODE that can commute with byte swapping, with result mode MODE and
2010 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2011 Return zero if no simplification or canonicalization is possible. */
2013 static rtx
2014 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2015 rtx op0, rtx op1)
2017 rtx tem;
2019 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2020 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2022 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2023 simplify_gen_unary (BSWAP, mode, op1, mode));
2024 return simplify_gen_unary (BSWAP, mode, tem, mode);
2027 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2028 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2030 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2031 return simplify_gen_unary (BSWAP, mode, tem, mode);
2034 return NULL_RTX;
2037 /* Subroutine of simplify_binary_operation to simplify a commutative,
2038 associative binary operation CODE with result mode MODE, operating
2039 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2040 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2041 canonicalization is possible. */
2043 static rtx
2044 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2045 rtx op0, rtx op1)
2047 rtx tem;
2049 /* Linearize the operator to the left. */
2050 if (GET_CODE (op1) == code)
2052 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2053 if (GET_CODE (op0) == code)
2055 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2056 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2059 /* "a op (b op c)" becomes "(b op c) op a". */
2060 if (! swap_commutative_operands_p (op1, op0))
2061 return simplify_gen_binary (code, mode, op1, op0);
2063 std::swap (op0, op1);
2066 if (GET_CODE (op0) == code)
2068 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2069 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2071 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2072 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2075 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2076 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2077 if (tem != 0)
2078 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2080 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2081 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2082 if (tem != 0)
2083 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2086 return 0;
2090 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2091 and OP1. Return 0 if no simplification is possible.
2093 Don't use this for relational operations such as EQ or LT.
2094 Use simplify_relational_operation instead. */
2096 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2097 rtx op0, rtx op1)
2099 rtx trueop0, trueop1;
2100 rtx tem;
2102 /* Relational operations don't work here. We must know the mode
2103 of the operands in order to do the comparison correctly.
2104 Assuming a full word can give incorrect results.
2105 Consider comparing 128 with -128 in QImode. */
2106 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2107 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2109 /* Make sure the constant is second. */
2110 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2111 && swap_commutative_operands_p (op0, op1))
2112 std::swap (op0, op1);
2114 trueop0 = avoid_constant_pool_reference (op0);
2115 trueop1 = avoid_constant_pool_reference (op1);
2117 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2118 if (tem)
2119 return tem;
2120 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2122 if (tem)
2123 return tem;
2125 /* If the above steps did not result in a simplification and op0 or op1
2126 were constant pool references, use the referenced constants directly. */
2127 if (trueop0 != op0 || trueop1 != op1)
2128 return simplify_gen_binary (code, mode, trueop0, trueop1);
2130 return NULL_RTX;
2133 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2134 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2135 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2136 actual constants. */
2138 static rtx
2139 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2140 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2142 rtx tem, reversed, opleft, opright;
2143 HOST_WIDE_INT val;
2144 unsigned int width = GET_MODE_PRECISION (mode);
2145 scalar_int_mode int_mode, inner_mode;
2147 /* Even if we can't compute a constant result,
2148 there are some cases worth simplifying. */
2150 switch (code)
2152 case PLUS:
2153 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2154 when x is NaN, infinite, or finite and nonzero. They aren't
2155 when x is -0 and the rounding mode is not towards -infinity,
2156 since (-0) + 0 is then 0. */
2157 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2158 return op0;
2160 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2161 transformations are safe even for IEEE. */
2162 if (GET_CODE (op0) == NEG)
2163 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2164 else if (GET_CODE (op1) == NEG)
2165 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2167 /* (~a) + 1 -> -a */
2168 if (INTEGRAL_MODE_P (mode)
2169 && GET_CODE (op0) == NOT
2170 && trueop1 == const1_rtx)
2171 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2173 /* Handle both-operands-constant cases. We can only add
2174 CONST_INTs to constants since the sum of relocatable symbols
2175 can't be handled by most assemblers. Don't add CONST_INT
2176 to CONST_INT since overflow won't be computed properly if wider
2177 than HOST_BITS_PER_WIDE_INT. */
2179 if ((GET_CODE (op0) == CONST
2180 || GET_CODE (op0) == SYMBOL_REF
2181 || GET_CODE (op0) == LABEL_REF)
2182 && CONST_INT_P (op1))
2183 return plus_constant (mode, op0, INTVAL (op1));
2184 else if ((GET_CODE (op1) == CONST
2185 || GET_CODE (op1) == SYMBOL_REF
2186 || GET_CODE (op1) == LABEL_REF)
2187 && CONST_INT_P (op0))
2188 return plus_constant (mode, op1, INTVAL (op0));
2190 /* See if this is something like X * C - X or vice versa or
2191 if the multiplication is written as a shift. If so, we can
2192 distribute and make a new multiply, shift, or maybe just
2193 have X (if C is 2 in the example above). But don't make
2194 something more expensive than we had before. */
2196 if (is_a <scalar_int_mode> (mode, &int_mode))
2198 rtx lhs = op0, rhs = op1;
2200 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2201 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2203 if (GET_CODE (lhs) == NEG)
2205 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2206 lhs = XEXP (lhs, 0);
2208 else if (GET_CODE (lhs) == MULT
2209 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2211 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2212 lhs = XEXP (lhs, 0);
2214 else if (GET_CODE (lhs) == ASHIFT
2215 && CONST_INT_P (XEXP (lhs, 1))
2216 && INTVAL (XEXP (lhs, 1)) >= 0
2217 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2219 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2220 GET_MODE_PRECISION (int_mode));
2221 lhs = XEXP (lhs, 0);
2224 if (GET_CODE (rhs) == NEG)
2226 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2227 rhs = XEXP (rhs, 0);
2229 else if (GET_CODE (rhs) == MULT
2230 && CONST_INT_P (XEXP (rhs, 1)))
2232 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2233 rhs = XEXP (rhs, 0);
2235 else if (GET_CODE (rhs) == ASHIFT
2236 && CONST_INT_P (XEXP (rhs, 1))
2237 && INTVAL (XEXP (rhs, 1)) >= 0
2238 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2240 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2241 GET_MODE_PRECISION (int_mode));
2242 rhs = XEXP (rhs, 0);
2245 if (rtx_equal_p (lhs, rhs))
2247 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2248 rtx coeff;
2249 bool speed = optimize_function_for_speed_p (cfun);
2251 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2253 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2254 return (set_src_cost (tem, int_mode, speed)
2255 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2259 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2260 if (CONST_SCALAR_INT_P (op1)
2261 && GET_CODE (op0) == XOR
2262 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2263 && mode_signbit_p (mode, op1))
2264 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2265 simplify_gen_binary (XOR, mode, op1,
2266 XEXP (op0, 1)));
2268 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2269 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2270 && GET_CODE (op0) == MULT
2271 && GET_CODE (XEXP (op0, 0)) == NEG)
2273 rtx in1, in2;
2275 in1 = XEXP (XEXP (op0, 0), 0);
2276 in2 = XEXP (op0, 1);
2277 return simplify_gen_binary (MINUS, mode, op1,
2278 simplify_gen_binary (MULT, mode,
2279 in1, in2));
2282 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2283 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2284 is 1. */
2285 if (COMPARISON_P (op0)
2286 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2287 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2288 && (reversed = reversed_comparison (op0, mode)))
2289 return
2290 simplify_gen_unary (NEG, mode, reversed, mode);
2292 /* If one of the operands is a PLUS or a MINUS, see if we can
2293 simplify this by the associative law.
2294 Don't use the associative law for floating point.
2295 The inaccuracy makes it nonassociative,
2296 and subtle programs can break if operations are associated. */
2298 if (INTEGRAL_MODE_P (mode)
2299 && (plus_minus_operand_p (op0)
2300 || plus_minus_operand_p (op1))
2301 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2302 return tem;
2304 /* Reassociate floating point addition only when the user
2305 specifies associative math operations. */
2306 if (FLOAT_MODE_P (mode)
2307 && flag_associative_math)
2309 tem = simplify_associative_operation (code, mode, op0, op1);
2310 if (tem)
2311 return tem;
2313 break;
2315 case COMPARE:
2316 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2317 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2318 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2319 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2321 rtx xop00 = XEXP (op0, 0);
2322 rtx xop10 = XEXP (op1, 0);
2324 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2325 return xop00;
2327 if (REG_P (xop00) && REG_P (xop10)
2328 && REGNO (xop00) == REGNO (xop10)
2329 && GET_MODE (xop00) == mode
2330 && GET_MODE (xop10) == mode
2331 && GET_MODE_CLASS (mode) == MODE_CC)
2332 return xop00;
2334 break;
2336 case MINUS:
2337 /* We can't assume x-x is 0 even with non-IEEE floating point,
2338 but since it is zero except in very strange circumstances, we
2339 will treat it as zero with -ffinite-math-only. */
2340 if (rtx_equal_p (trueop0, trueop1)
2341 && ! side_effects_p (op0)
2342 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2343 return CONST0_RTX (mode);
2345 /* Change subtraction from zero into negation. (0 - x) is the
2346 same as -x when x is NaN, infinite, or finite and nonzero.
2347 But if the mode has signed zeros, and does not round towards
2348 -infinity, then 0 - 0 is 0, not -0. */
2349 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2350 return simplify_gen_unary (NEG, mode, op1, mode);
2352 /* (-1 - a) is ~a, unless the expression contains symbolic
2353 constants, in which case not retaining additions and
2354 subtractions could cause invalid assembly to be produced. */
2355 if (trueop0 == constm1_rtx
2356 && !contains_symbolic_reference_p (op1))
2357 return simplify_gen_unary (NOT, mode, op1, mode);
2359 /* Subtracting 0 has no effect unless the mode has signed zeros
2360 and supports rounding towards -infinity. In such a case,
2361 0 - 0 is -0. */
2362 if (!(HONOR_SIGNED_ZEROS (mode)
2363 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2364 && trueop1 == CONST0_RTX (mode))
2365 return op0;
2367 /* See if this is something like X * C - X or vice versa or
2368 if the multiplication is written as a shift. If so, we can
2369 distribute and make a new multiply, shift, or maybe just
2370 have X (if C is 2 in the example above). But don't make
2371 something more expensive than we had before. */
2373 if (is_a <scalar_int_mode> (mode, &int_mode))
2375 rtx lhs = op0, rhs = op1;
2377 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2378 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2380 if (GET_CODE (lhs) == NEG)
2382 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2383 lhs = XEXP (lhs, 0);
2385 else if (GET_CODE (lhs) == MULT
2386 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2388 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2389 lhs = XEXP (lhs, 0);
2391 else if (GET_CODE (lhs) == ASHIFT
2392 && CONST_INT_P (XEXP (lhs, 1))
2393 && INTVAL (XEXP (lhs, 1)) >= 0
2394 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2396 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2397 GET_MODE_PRECISION (int_mode));
2398 lhs = XEXP (lhs, 0);
2401 if (GET_CODE (rhs) == NEG)
2403 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2404 rhs = XEXP (rhs, 0);
2406 else if (GET_CODE (rhs) == MULT
2407 && CONST_INT_P (XEXP (rhs, 1)))
2409 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2410 rhs = XEXP (rhs, 0);
2412 else if (GET_CODE (rhs) == ASHIFT
2413 && CONST_INT_P (XEXP (rhs, 1))
2414 && INTVAL (XEXP (rhs, 1)) >= 0
2415 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2417 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2418 GET_MODE_PRECISION (int_mode));
2419 negcoeff1 = -negcoeff1;
2420 rhs = XEXP (rhs, 0);
2423 if (rtx_equal_p (lhs, rhs))
2425 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2426 rtx coeff;
2427 bool speed = optimize_function_for_speed_p (cfun);
2429 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2431 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2432 return (set_src_cost (tem, int_mode, speed)
2433 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2437 /* (a - (-b)) -> (a + b). True even for IEEE. */
2438 if (GET_CODE (op1) == NEG)
2439 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2441 /* (-x - c) may be simplified as (-c - x). */
2442 if (GET_CODE (op0) == NEG
2443 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2445 tem = simplify_unary_operation (NEG, mode, op1, mode);
2446 if (tem)
2447 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2450 /* Don't let a relocatable value get a negative coeff. */
2451 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2452 return simplify_gen_binary (PLUS, mode,
2453 op0,
2454 neg_const_int (mode, op1));
2456 /* (x - (x & y)) -> (x & ~y) */
2457 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2459 if (rtx_equal_p (op0, XEXP (op1, 0)))
2461 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2462 GET_MODE (XEXP (op1, 1)));
2463 return simplify_gen_binary (AND, mode, op0, tem);
2465 if (rtx_equal_p (op0, XEXP (op1, 1)))
2467 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2468 GET_MODE (XEXP (op1, 0)));
2469 return simplify_gen_binary (AND, mode, op0, tem);
2473 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2474 by reversing the comparison code if valid. */
2475 if (STORE_FLAG_VALUE == 1
2476 && trueop0 == const1_rtx
2477 && COMPARISON_P (op1)
2478 && (reversed = reversed_comparison (op1, mode)))
2479 return reversed;
2481 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2482 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2483 && GET_CODE (op1) == MULT
2484 && GET_CODE (XEXP (op1, 0)) == NEG)
2486 rtx in1, in2;
2488 in1 = XEXP (XEXP (op1, 0), 0);
2489 in2 = XEXP (op1, 1);
2490 return simplify_gen_binary (PLUS, mode,
2491 simplify_gen_binary (MULT, mode,
2492 in1, in2),
2493 op0);
2496 /* Canonicalize (minus (neg A) (mult B C)) to
2497 (minus (mult (neg B) C) A). */
2498 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2499 && GET_CODE (op1) == MULT
2500 && GET_CODE (op0) == NEG)
2502 rtx in1, in2;
2504 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2505 in2 = XEXP (op1, 1);
2506 return simplify_gen_binary (MINUS, mode,
2507 simplify_gen_binary (MULT, mode,
2508 in1, in2),
2509 XEXP (op0, 0));
2512 /* If one of the operands is a PLUS or a MINUS, see if we can
2513 simplify this by the associative law. This will, for example,
2514 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2515 Don't use the associative law for floating point.
2516 The inaccuracy makes it nonassociative,
2517 and subtle programs can break if operations are associated. */
2519 if (INTEGRAL_MODE_P (mode)
2520 && (plus_minus_operand_p (op0)
2521 || plus_minus_operand_p (op1))
2522 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2523 return tem;
2524 break;
2526 case MULT:
2527 if (trueop1 == constm1_rtx)
2528 return simplify_gen_unary (NEG, mode, op0, mode);
2530 if (GET_CODE (op0) == NEG)
2532 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2533 /* If op1 is a MULT as well and simplify_unary_operation
2534 just moved the NEG to the second operand, simplify_gen_binary
2535 below could through simplify_associative_operation move
2536 the NEG around again and recurse endlessly. */
2537 if (temp
2538 && GET_CODE (op1) == MULT
2539 && GET_CODE (temp) == MULT
2540 && XEXP (op1, 0) == XEXP (temp, 0)
2541 && GET_CODE (XEXP (temp, 1)) == NEG
2542 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2543 temp = NULL_RTX;
2544 if (temp)
2545 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2547 if (GET_CODE (op1) == NEG)
2549 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2550 /* If op0 is a MULT as well and simplify_unary_operation
2551 just moved the NEG to the second operand, simplify_gen_binary
2552 below could through simplify_associative_operation move
2553 the NEG around again and recurse endlessly. */
2554 if (temp
2555 && GET_CODE (op0) == MULT
2556 && GET_CODE (temp) == MULT
2557 && XEXP (op0, 0) == XEXP (temp, 0)
2558 && GET_CODE (XEXP (temp, 1)) == NEG
2559 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2560 temp = NULL_RTX;
2561 if (temp)
2562 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2565 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2566 x is NaN, since x * 0 is then also NaN. Nor is it valid
2567 when the mode has signed zeros, since multiplying a negative
2568 number by 0 will give -0, not 0. */
2569 if (!HONOR_NANS (mode)
2570 && !HONOR_SIGNED_ZEROS (mode)
2571 && trueop1 == CONST0_RTX (mode)
2572 && ! side_effects_p (op0))
2573 return op1;
2575 /* In IEEE floating point, x*1 is not equivalent to x for
2576 signalling NaNs. */
2577 if (!HONOR_SNANS (mode)
2578 && trueop1 == CONST1_RTX (mode))
2579 return op0;
2581 /* Convert multiply by constant power of two into shift. */
2582 if (CONST_SCALAR_INT_P (trueop1))
2584 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2585 if (val >= 0)
2586 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2589 /* x*2 is x+x and x*(-1) is -x */
2590 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2591 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2592 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2593 && GET_MODE (op0) == mode)
2595 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2597 if (real_equal (d1, &dconst2))
2598 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2600 if (!HONOR_SNANS (mode)
2601 && real_equal (d1, &dconstm1))
2602 return simplify_gen_unary (NEG, mode, op0, mode);
2605 /* Optimize -x * -x as x * x. */
2606 if (FLOAT_MODE_P (mode)
2607 && GET_CODE (op0) == NEG
2608 && GET_CODE (op1) == NEG
2609 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2610 && !side_effects_p (XEXP (op0, 0)))
2611 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2613 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2614 if (SCALAR_FLOAT_MODE_P (mode)
2615 && GET_CODE (op0) == ABS
2616 && GET_CODE (op1) == ABS
2617 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2618 && !side_effects_p (XEXP (op0, 0)))
2619 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2621 /* Reassociate multiplication, but for floating point MULTs
2622 only when the user specifies unsafe math optimizations. */
2623 if (! FLOAT_MODE_P (mode)
2624 || flag_unsafe_math_optimizations)
2626 tem = simplify_associative_operation (code, mode, op0, op1);
2627 if (tem)
2628 return tem;
2630 break;
2632 case IOR:
2633 if (trueop1 == CONST0_RTX (mode))
2634 return op0;
2635 if (INTEGRAL_MODE_P (mode)
2636 && trueop1 == CONSTM1_RTX (mode)
2637 && !side_effects_p (op0))
2638 return op1;
2639 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2640 return op0;
2641 /* A | (~A) -> -1 */
2642 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2643 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2644 && ! side_effects_p (op0)
2645 && SCALAR_INT_MODE_P (mode))
2646 return constm1_rtx;
2648 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2649 if (CONST_INT_P (op1)
2650 && HWI_COMPUTABLE_MODE_P (mode)
2651 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2652 && !side_effects_p (op0))
2653 return op1;
2655 /* Canonicalize (X & C1) | C2. */
2656 if (GET_CODE (op0) == AND
2657 && CONST_INT_P (trueop1)
2658 && CONST_INT_P (XEXP (op0, 1)))
2660 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2661 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2662 HOST_WIDE_INT c2 = INTVAL (trueop1);
2664 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2665 if ((c1 & c2) == c1
2666 && !side_effects_p (XEXP (op0, 0)))
2667 return trueop1;
2669 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2670 if (((c1|c2) & mask) == mask)
2671 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2673 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2674 if (((c1 & ~c2) & mask) != (c1 & mask))
2676 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2677 gen_int_mode (c1 & ~c2, mode));
2678 return simplify_gen_binary (IOR, mode, tem, op1);
2682 /* Convert (A & B) | A to A. */
2683 if (GET_CODE (op0) == AND
2684 && (rtx_equal_p (XEXP (op0, 0), op1)
2685 || rtx_equal_p (XEXP (op0, 1), op1))
2686 && ! side_effects_p (XEXP (op0, 0))
2687 && ! side_effects_p (XEXP (op0, 1)))
2688 return op1;
2690 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2691 mode size to (rotate A CX). */
2693 if (GET_CODE (op1) == ASHIFT
2694 || GET_CODE (op1) == SUBREG)
2696 opleft = op1;
2697 opright = op0;
2699 else
2701 opright = op1;
2702 opleft = op0;
2705 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2706 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2707 && CONST_INT_P (XEXP (opleft, 1))
2708 && CONST_INT_P (XEXP (opright, 1))
2709 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2710 == GET_MODE_PRECISION (mode)))
2711 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2713 /* Same, but for ashift that has been "simplified" to a wider mode
2714 by simplify_shift_const. */
2716 if (GET_CODE (opleft) == SUBREG
2717 && is_a <scalar_int_mode> (mode, &int_mode)
2718 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2719 &inner_mode)
2720 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2721 && GET_CODE (opright) == LSHIFTRT
2722 && GET_CODE (XEXP (opright, 0)) == SUBREG
2723 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2724 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2725 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2726 SUBREG_REG (XEXP (opright, 0)))
2727 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2728 && CONST_INT_P (XEXP (opright, 1))
2729 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2730 + INTVAL (XEXP (opright, 1))
2731 == GET_MODE_PRECISION (int_mode)))
2732 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2733 XEXP (SUBREG_REG (opleft), 1));
2735 /* If we have (ior (and (X C1) C2)), simplify this by making
2736 C1 as small as possible if C1 actually changes. */
2737 if (CONST_INT_P (op1)
2738 && (HWI_COMPUTABLE_MODE_P (mode)
2739 || INTVAL (op1) > 0)
2740 && GET_CODE (op0) == AND
2741 && CONST_INT_P (XEXP (op0, 1))
2742 && CONST_INT_P (op1)
2743 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2745 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2746 gen_int_mode (UINTVAL (XEXP (op0, 1))
2747 & ~UINTVAL (op1),
2748 mode));
2749 return simplify_gen_binary (IOR, mode, tmp, op1);
2752 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2753 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2754 the PLUS does not affect any of the bits in OP1: then we can do
2755 the IOR as a PLUS and we can associate. This is valid if OP1
2756 can be safely shifted left C bits. */
2757 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2758 && GET_CODE (XEXP (op0, 0)) == PLUS
2759 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2760 && CONST_INT_P (XEXP (op0, 1))
2761 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2763 int count = INTVAL (XEXP (op0, 1));
2764 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2766 if (mask >> count == INTVAL (trueop1)
2767 && trunc_int_for_mode (mask, mode) == mask
2768 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2769 return simplify_gen_binary (ASHIFTRT, mode,
2770 plus_constant (mode, XEXP (op0, 0),
2771 mask),
2772 XEXP (op0, 1));
2775 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2776 if (tem)
2777 return tem;
2779 tem = simplify_associative_operation (code, mode, op0, op1);
2780 if (tem)
2781 return tem;
2782 break;
2784 case XOR:
2785 if (trueop1 == CONST0_RTX (mode))
2786 return op0;
2787 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2788 return simplify_gen_unary (NOT, mode, op0, mode);
2789 if (rtx_equal_p (trueop0, trueop1)
2790 && ! side_effects_p (op0)
2791 && GET_MODE_CLASS (mode) != MODE_CC)
2792 return CONST0_RTX (mode);
2794 /* Canonicalize XOR of the most significant bit to PLUS. */
2795 if (CONST_SCALAR_INT_P (op1)
2796 && mode_signbit_p (mode, op1))
2797 return simplify_gen_binary (PLUS, mode, op0, op1);
2798 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2799 if (CONST_SCALAR_INT_P (op1)
2800 && GET_CODE (op0) == PLUS
2801 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2802 && mode_signbit_p (mode, XEXP (op0, 1)))
2803 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2804 simplify_gen_binary (XOR, mode, op1,
2805 XEXP (op0, 1)));
2807 /* If we are XORing two things that have no bits in common,
2808 convert them into an IOR. This helps to detect rotation encoded
2809 using those methods and possibly other simplifications. */
2811 if (HWI_COMPUTABLE_MODE_P (mode)
2812 && (nonzero_bits (op0, mode)
2813 & nonzero_bits (op1, mode)) == 0)
2814 return (simplify_gen_binary (IOR, mode, op0, op1));
2816 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2817 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2818 (NOT y). */
2820 int num_negated = 0;
2822 if (GET_CODE (op0) == NOT)
2823 num_negated++, op0 = XEXP (op0, 0);
2824 if (GET_CODE (op1) == NOT)
2825 num_negated++, op1 = XEXP (op1, 0);
2827 if (num_negated == 2)
2828 return simplify_gen_binary (XOR, mode, op0, op1);
2829 else if (num_negated == 1)
2830 return simplify_gen_unary (NOT, mode,
2831 simplify_gen_binary (XOR, mode, op0, op1),
2832 mode);
2835 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2836 correspond to a machine insn or result in further simplifications
2837 if B is a constant. */
2839 if (GET_CODE (op0) == AND
2840 && rtx_equal_p (XEXP (op0, 1), op1)
2841 && ! side_effects_p (op1))
2842 return simplify_gen_binary (AND, mode,
2843 simplify_gen_unary (NOT, mode,
2844 XEXP (op0, 0), mode),
2845 op1);
2847 else if (GET_CODE (op0) == AND
2848 && rtx_equal_p (XEXP (op0, 0), op1)
2849 && ! side_effects_p (op1))
2850 return simplify_gen_binary (AND, mode,
2851 simplify_gen_unary (NOT, mode,
2852 XEXP (op0, 1), mode),
2853 op1);
2855 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2856 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2857 out bits inverted twice and not set by C. Similarly, given
2858 (xor (and (xor A B) C) D), simplify without inverting C in
2859 the xor operand: (xor (and A C) (B&C)^D).
2861 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2862 && GET_CODE (XEXP (op0, 0)) == XOR
2863 && CONST_INT_P (op1)
2864 && CONST_INT_P (XEXP (op0, 1))
2865 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2867 enum rtx_code op = GET_CODE (op0);
2868 rtx a = XEXP (XEXP (op0, 0), 0);
2869 rtx b = XEXP (XEXP (op0, 0), 1);
2870 rtx c = XEXP (op0, 1);
2871 rtx d = op1;
2872 HOST_WIDE_INT bval = INTVAL (b);
2873 HOST_WIDE_INT cval = INTVAL (c);
2874 HOST_WIDE_INT dval = INTVAL (d);
2875 HOST_WIDE_INT xcval;
2877 if (op == IOR)
2878 xcval = ~cval;
2879 else
2880 xcval = cval;
2882 return simplify_gen_binary (XOR, mode,
2883 simplify_gen_binary (op, mode, a, c),
2884 gen_int_mode ((bval & xcval) ^ dval,
2885 mode));
2888 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2889 we can transform like this:
2890 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2891 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2892 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2893 Attempt a few simplifications when B and C are both constants. */
2894 if (GET_CODE (op0) == AND
2895 && CONST_INT_P (op1)
2896 && CONST_INT_P (XEXP (op0, 1)))
2898 rtx a = XEXP (op0, 0);
2899 rtx b = XEXP (op0, 1);
2900 rtx c = op1;
2901 HOST_WIDE_INT bval = INTVAL (b);
2902 HOST_WIDE_INT cval = INTVAL (c);
2904 /* Instead of computing ~A&C, we compute its negated value,
2905 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2906 optimize for sure. If it does not simplify, we still try
2907 to compute ~A&C below, but since that always allocates
2908 RTL, we don't try that before committing to returning a
2909 simplified expression. */
2910 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2911 GEN_INT (~cval));
2913 if ((~cval & bval) == 0)
2915 rtx na_c = NULL_RTX;
2916 if (n_na_c)
2917 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2918 else
2920 /* If ~A does not simplify, don't bother: we don't
2921 want to simplify 2 operations into 3, and if na_c
2922 were to simplify with na, n_na_c would have
2923 simplified as well. */
2924 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2925 if (na)
2926 na_c = simplify_gen_binary (AND, mode, na, c);
2929 /* Try to simplify ~A&C | ~B&C. */
2930 if (na_c != NULL_RTX)
2931 return simplify_gen_binary (IOR, mode, na_c,
2932 gen_int_mode (~bval & cval, mode));
2934 else
2936 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2937 if (n_na_c == CONSTM1_RTX (mode))
2939 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2940 gen_int_mode (~cval & bval,
2941 mode));
2942 return simplify_gen_binary (IOR, mode, a_nc_b,
2943 gen_int_mode (~bval & cval,
2944 mode));
2949 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2950 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2951 machines, and also has shorter instruction path length. */
2952 if (GET_CODE (op0) == AND
2953 && GET_CODE (XEXP (op0, 0)) == XOR
2954 && CONST_INT_P (XEXP (op0, 1))
2955 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2957 rtx a = trueop1;
2958 rtx b = XEXP (XEXP (op0, 0), 1);
2959 rtx c = XEXP (op0, 1);
2960 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2961 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2962 rtx bc = simplify_gen_binary (AND, mode, b, c);
2963 return simplify_gen_binary (IOR, mode, a_nc, bc);
2965 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2966 else if (GET_CODE (op0) == AND
2967 && GET_CODE (XEXP (op0, 0)) == XOR
2968 && CONST_INT_P (XEXP (op0, 1))
2969 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2971 rtx a = XEXP (XEXP (op0, 0), 0);
2972 rtx b = trueop1;
2973 rtx c = XEXP (op0, 1);
2974 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2975 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2976 rtx ac = simplify_gen_binary (AND, mode, a, c);
2977 return simplify_gen_binary (IOR, mode, ac, b_nc);
2980 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2981 comparison if STORE_FLAG_VALUE is 1. */
2982 if (STORE_FLAG_VALUE == 1
2983 && trueop1 == const1_rtx
2984 && COMPARISON_P (op0)
2985 && (reversed = reversed_comparison (op0, mode)))
2986 return reversed;
2988 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2989 is (lt foo (const_int 0)), so we can perform the above
2990 simplification if STORE_FLAG_VALUE is 1. */
2992 if (STORE_FLAG_VALUE == 1
2993 && trueop1 == const1_rtx
2994 && GET_CODE (op0) == LSHIFTRT
2995 && CONST_INT_P (XEXP (op0, 1))
2996 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2997 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2999 /* (xor (comparison foo bar) (const_int sign-bit))
3000 when STORE_FLAG_VALUE is the sign bit. */
3001 if (val_signbit_p (mode, STORE_FLAG_VALUE)
3002 && trueop1 == const_true_rtx
3003 && COMPARISON_P (op0)
3004 && (reversed = reversed_comparison (op0, mode)))
3005 return reversed;
3007 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3008 if (tem)
3009 return tem;
3011 tem = simplify_associative_operation (code, mode, op0, op1);
3012 if (tem)
3013 return tem;
3014 break;
3016 case AND:
3017 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3018 return trueop1;
3019 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3020 return op0;
3021 if (HWI_COMPUTABLE_MODE_P (mode))
3023 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3024 HOST_WIDE_INT nzop1;
3025 if (CONST_INT_P (trueop1))
3027 HOST_WIDE_INT val1 = INTVAL (trueop1);
3028 /* If we are turning off bits already known off in OP0, we need
3029 not do an AND. */
3030 if ((nzop0 & ~val1) == 0)
3031 return op0;
3033 nzop1 = nonzero_bits (trueop1, mode);
3034 /* If we are clearing all the nonzero bits, the result is zero. */
3035 if ((nzop1 & nzop0) == 0
3036 && !side_effects_p (op0) && !side_effects_p (op1))
3037 return CONST0_RTX (mode);
3039 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3040 && GET_MODE_CLASS (mode) != MODE_CC)
3041 return op0;
3042 /* A & (~A) -> 0 */
3043 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3044 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3045 && ! side_effects_p (op0)
3046 && GET_MODE_CLASS (mode) != MODE_CC)
3047 return CONST0_RTX (mode);
3049 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3050 there are no nonzero bits of C outside of X's mode. */
3051 if ((GET_CODE (op0) == SIGN_EXTEND
3052 || GET_CODE (op0) == ZERO_EXTEND)
3053 && CONST_INT_P (trueop1)
3054 && HWI_COMPUTABLE_MODE_P (mode)
3055 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3056 & UINTVAL (trueop1)) == 0)
3058 machine_mode imode = GET_MODE (XEXP (op0, 0));
3059 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3060 gen_int_mode (INTVAL (trueop1),
3061 imode));
3062 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3065 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3066 we might be able to further simplify the AND with X and potentially
3067 remove the truncation altogether. */
3068 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3070 rtx x = XEXP (op0, 0);
3071 machine_mode xmode = GET_MODE (x);
3072 tem = simplify_gen_binary (AND, xmode, x,
3073 gen_int_mode (INTVAL (trueop1), xmode));
3074 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3077 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3078 if (GET_CODE (op0) == IOR
3079 && CONST_INT_P (trueop1)
3080 && CONST_INT_P (XEXP (op0, 1)))
3082 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3083 return simplify_gen_binary (IOR, mode,
3084 simplify_gen_binary (AND, mode,
3085 XEXP (op0, 0), op1),
3086 gen_int_mode (tmp, mode));
3089 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3090 insn (and may simplify more). */
3091 if (GET_CODE (op0) == XOR
3092 && rtx_equal_p (XEXP (op0, 0), op1)
3093 && ! side_effects_p (op1))
3094 return simplify_gen_binary (AND, mode,
3095 simplify_gen_unary (NOT, mode,
3096 XEXP (op0, 1), mode),
3097 op1);
3099 if (GET_CODE (op0) == XOR
3100 && rtx_equal_p (XEXP (op0, 1), op1)
3101 && ! side_effects_p (op1))
3102 return simplify_gen_binary (AND, mode,
3103 simplify_gen_unary (NOT, mode,
3104 XEXP (op0, 0), mode),
3105 op1);
3107 /* Similarly for (~(A ^ B)) & A. */
3108 if (GET_CODE (op0) == NOT
3109 && GET_CODE (XEXP (op0, 0)) == XOR
3110 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3111 && ! side_effects_p (op1))
3112 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3114 if (GET_CODE (op0) == NOT
3115 && GET_CODE (XEXP (op0, 0)) == XOR
3116 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3117 && ! side_effects_p (op1))
3118 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3120 /* Convert (A | B) & A to A. */
3121 if (GET_CODE (op0) == IOR
3122 && (rtx_equal_p (XEXP (op0, 0), op1)
3123 || rtx_equal_p (XEXP (op0, 1), op1))
3124 && ! side_effects_p (XEXP (op0, 0))
3125 && ! side_effects_p (XEXP (op0, 1)))
3126 return op1;
3128 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3129 ((A & N) + B) & M -> (A + B) & M
3130 Similarly if (N & M) == 0,
3131 ((A | N) + B) & M -> (A + B) & M
3132 and for - instead of + and/or ^ instead of |.
3133 Also, if (N & M) == 0, then
3134 (A +- N) & M -> A & M. */
3135 if (CONST_INT_P (trueop1)
3136 && HWI_COMPUTABLE_MODE_P (mode)
3137 && ~UINTVAL (trueop1)
3138 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3139 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3141 rtx pmop[2];
3142 int which;
3144 pmop[0] = XEXP (op0, 0);
3145 pmop[1] = XEXP (op0, 1);
3147 if (CONST_INT_P (pmop[1])
3148 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3149 return simplify_gen_binary (AND, mode, pmop[0], op1);
3151 for (which = 0; which < 2; which++)
3153 tem = pmop[which];
3154 switch (GET_CODE (tem))
3156 case AND:
3157 if (CONST_INT_P (XEXP (tem, 1))
3158 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3159 == UINTVAL (trueop1))
3160 pmop[which] = XEXP (tem, 0);
3161 break;
3162 case IOR:
3163 case XOR:
3164 if (CONST_INT_P (XEXP (tem, 1))
3165 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3166 pmop[which] = XEXP (tem, 0);
3167 break;
3168 default:
3169 break;
3173 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3175 tem = simplify_gen_binary (GET_CODE (op0), mode,
3176 pmop[0], pmop[1]);
3177 return simplify_gen_binary (code, mode, tem, op1);
3181 /* (and X (ior (not X) Y) -> (and X Y) */
3182 if (GET_CODE (op1) == IOR
3183 && GET_CODE (XEXP (op1, 0)) == NOT
3184 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3185 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3187 /* (and (ior (not X) Y) X) -> (and X Y) */
3188 if (GET_CODE (op0) == IOR
3189 && GET_CODE (XEXP (op0, 0)) == NOT
3190 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3191 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3193 /* (and X (ior Y (not X)) -> (and X Y) */
3194 if (GET_CODE (op1) == IOR
3195 && GET_CODE (XEXP (op1, 1)) == NOT
3196 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3197 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3199 /* (and (ior Y (not X)) X) -> (and X Y) */
3200 if (GET_CODE (op0) == IOR
3201 && GET_CODE (XEXP (op0, 1)) == NOT
3202 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3203 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3205 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3206 if (tem)
3207 return tem;
3209 tem = simplify_associative_operation (code, mode, op0, op1);
3210 if (tem)
3211 return tem;
3212 break;
3214 case UDIV:
3215 /* 0/x is 0 (or x&0 if x has side-effects). */
3216 if (trueop0 == CONST0_RTX (mode)
3217 && !cfun->can_throw_non_call_exceptions)
3219 if (side_effects_p (op1))
3220 return simplify_gen_binary (AND, mode, op1, trueop0);
3221 return trueop0;
3223 /* x/1 is x. */
3224 if (trueop1 == CONST1_RTX (mode))
3226 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3227 if (tem)
3228 return tem;
3230 /* Convert divide by power of two into shift. */
3231 if (CONST_INT_P (trueop1)
3232 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3233 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3234 break;
3236 case DIV:
3237 /* Handle floating point and integers separately. */
3238 if (SCALAR_FLOAT_MODE_P (mode))
3240 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3241 safe for modes with NaNs, since 0.0 / 0.0 will then be
3242 NaN rather than 0.0. Nor is it safe for modes with signed
3243 zeros, since dividing 0 by a negative number gives -0.0 */
3244 if (trueop0 == CONST0_RTX (mode)
3245 && !HONOR_NANS (mode)
3246 && !HONOR_SIGNED_ZEROS (mode)
3247 && ! side_effects_p (op1))
3248 return op0;
3249 /* x/1.0 is x. */
3250 if (trueop1 == CONST1_RTX (mode)
3251 && !HONOR_SNANS (mode))
3252 return op0;
3254 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3255 && trueop1 != CONST0_RTX (mode))
3257 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3259 /* x/-1.0 is -x. */
3260 if (real_equal (d1, &dconstm1)
3261 && !HONOR_SNANS (mode))
3262 return simplify_gen_unary (NEG, mode, op0, mode);
3264 /* Change FP division by a constant into multiplication.
3265 Only do this with -freciprocal-math. */
3266 if (flag_reciprocal_math
3267 && !real_equal (d1, &dconst0))
3269 REAL_VALUE_TYPE d;
3270 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3271 tem = const_double_from_real_value (d, mode);
3272 return simplify_gen_binary (MULT, mode, op0, tem);
3276 else if (SCALAR_INT_MODE_P (mode))
3278 /* 0/x is 0 (or x&0 if x has side-effects). */
3279 if (trueop0 == CONST0_RTX (mode)
3280 && !cfun->can_throw_non_call_exceptions)
3282 if (side_effects_p (op1))
3283 return simplify_gen_binary (AND, mode, op1, trueop0);
3284 return trueop0;
3286 /* x/1 is x. */
3287 if (trueop1 == CONST1_RTX (mode))
3289 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3290 if (tem)
3291 return tem;
3293 /* x/-1 is -x. */
3294 if (trueop1 == constm1_rtx)
3296 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3297 if (x)
3298 return simplify_gen_unary (NEG, mode, x, mode);
3301 break;
3303 case UMOD:
3304 /* 0%x is 0 (or x&0 if x has side-effects). */
3305 if (trueop0 == CONST0_RTX (mode))
3307 if (side_effects_p (op1))
3308 return simplify_gen_binary (AND, mode, op1, trueop0);
3309 return trueop0;
3311 /* x%1 is 0 (of x&0 if x has side-effects). */
3312 if (trueop1 == CONST1_RTX (mode))
3314 if (side_effects_p (op0))
3315 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3316 return CONST0_RTX (mode);
3318 /* Implement modulus by power of two as AND. */
3319 if (CONST_INT_P (trueop1)
3320 && exact_log2 (UINTVAL (trueop1)) > 0)
3321 return simplify_gen_binary (AND, mode, op0,
3322 gen_int_mode (INTVAL (op1) - 1, mode));
3323 break;
3325 case MOD:
3326 /* 0%x is 0 (or x&0 if x has side-effects). */
3327 if (trueop0 == CONST0_RTX (mode))
3329 if (side_effects_p (op1))
3330 return simplify_gen_binary (AND, mode, op1, trueop0);
3331 return trueop0;
3333 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3334 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3336 if (side_effects_p (op0))
3337 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3338 return CONST0_RTX (mode);
3340 break;
3342 case ROTATERT:
3343 case ROTATE:
3344 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3345 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3346 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3347 amount instead. */
3348 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3349 if (CONST_INT_P (trueop1)
3350 && IN_RANGE (INTVAL (trueop1),
3351 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3352 GET_MODE_PRECISION (mode) - 1))
3353 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3354 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3355 - INTVAL (trueop1)));
3356 #endif
3357 /* FALLTHRU */
3358 case ASHIFTRT:
3359 if (trueop1 == CONST0_RTX (mode))
3360 return op0;
3361 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3362 return op0;
3363 /* Rotating ~0 always results in ~0. */
3364 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3365 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3366 && ! side_effects_p (op1))
3367 return op0;
3369 canonicalize_shift:
3370 /* Given:
3371 scalar modes M1, M2
3372 scalar constants c1, c2
3373 size (M2) > size (M1)
3374 c1 == size (M2) - size (M1)
3375 optimize:
3376 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3377 <low_part>)
3378 (const_int <c2>))
3380 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3381 <low_part>). */
3382 if ((code == ASHIFTRT || code == LSHIFTRT)
3383 && is_a <scalar_int_mode> (mode, &int_mode)
3384 && SUBREG_P (op0)
3385 && CONST_INT_P (op1)
3386 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3387 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3388 &inner_mode)
3389 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3390 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3391 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3392 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3393 && subreg_lowpart_p (op0))
3395 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3396 + INTVAL (op1));
3397 tmp = simplify_gen_binary (code, inner_mode,
3398 XEXP (SUBREG_REG (op0), 0),
3399 tmp);
3400 return lowpart_subreg (int_mode, tmp, inner_mode);
3403 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3405 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3406 if (val != INTVAL (op1))
3407 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3409 break;
3411 case ASHIFT:
3412 case SS_ASHIFT:
3413 case US_ASHIFT:
3414 if (trueop1 == CONST0_RTX (mode))
3415 return op0;
3416 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3417 return op0;
3418 goto canonicalize_shift;
3420 case LSHIFTRT:
3421 if (trueop1 == CONST0_RTX (mode))
3422 return op0;
3423 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3424 return op0;
3425 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3426 if (GET_CODE (op0) == CLZ
3427 && CONST_INT_P (trueop1)
3428 && STORE_FLAG_VALUE == 1
3429 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3431 machine_mode imode = GET_MODE (XEXP (op0, 0));
3432 unsigned HOST_WIDE_INT zero_val = 0;
3434 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3435 && zero_val == GET_MODE_PRECISION (imode)
3436 && INTVAL (trueop1) == exact_log2 (zero_val))
3437 return simplify_gen_relational (EQ, mode, imode,
3438 XEXP (op0, 0), const0_rtx);
3440 goto canonicalize_shift;
3442 case SMIN:
3443 if (width <= HOST_BITS_PER_WIDE_INT
3444 && mode_signbit_p (mode, trueop1)
3445 && ! side_effects_p (op0))
3446 return op1;
3447 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3448 return op0;
3449 tem = simplify_associative_operation (code, mode, op0, op1);
3450 if (tem)
3451 return tem;
3452 break;
3454 case SMAX:
3455 if (width <= HOST_BITS_PER_WIDE_INT
3456 && CONST_INT_P (trueop1)
3457 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3458 && ! side_effects_p (op0))
3459 return op1;
3460 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3461 return op0;
3462 tem = simplify_associative_operation (code, mode, op0, op1);
3463 if (tem)
3464 return tem;
3465 break;
3467 case UMIN:
3468 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3469 return op1;
3470 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3471 return op0;
3472 tem = simplify_associative_operation (code, mode, op0, op1);
3473 if (tem)
3474 return tem;
3475 break;
3477 case UMAX:
3478 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3479 return op1;
3480 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3481 return op0;
3482 tem = simplify_associative_operation (code, mode, op0, op1);
3483 if (tem)
3484 return tem;
3485 break;
3487 case SS_PLUS:
3488 case US_PLUS:
3489 case SS_MINUS:
3490 case US_MINUS:
3491 case SS_MULT:
3492 case US_MULT:
3493 case SS_DIV:
3494 case US_DIV:
3495 /* ??? There are simplifications that can be done. */
3496 return 0;
3498 case VEC_SELECT:
3499 if (!VECTOR_MODE_P (mode))
3501 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3502 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3503 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3504 gcc_assert (XVECLEN (trueop1, 0) == 1);
3505 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3507 if (GET_CODE (trueop0) == CONST_VECTOR)
3508 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3509 (trueop1, 0, 0)));
3511 /* Extract a scalar element from a nested VEC_SELECT expression
3512 (with optional nested VEC_CONCAT expression). Some targets
3513 (i386) extract scalar element from a vector using chain of
3514 nested VEC_SELECT expressions. When input operand is a memory
3515 operand, this operation can be simplified to a simple scalar
3516 load from an offseted memory address. */
3517 if (GET_CODE (trueop0) == VEC_SELECT)
3519 rtx op0 = XEXP (trueop0, 0);
3520 rtx op1 = XEXP (trueop0, 1);
3522 machine_mode opmode = GET_MODE (op0);
3523 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3524 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3526 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3527 int elem;
3529 rtvec vec;
3530 rtx tmp_op, tmp;
3532 gcc_assert (GET_CODE (op1) == PARALLEL);
3533 gcc_assert (i < n_elts);
3535 /* Select element, pointed by nested selector. */
3536 elem = INTVAL (XVECEXP (op1, 0, i));
3538 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3539 if (GET_CODE (op0) == VEC_CONCAT)
3541 rtx op00 = XEXP (op0, 0);
3542 rtx op01 = XEXP (op0, 1);
3544 machine_mode mode00, mode01;
3545 int n_elts00, n_elts01;
3547 mode00 = GET_MODE (op00);
3548 mode01 = GET_MODE (op01);
3550 /* Find out number of elements of each operand. */
3551 if (VECTOR_MODE_P (mode00))
3553 elt_size = GET_MODE_UNIT_SIZE (mode00);
3554 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3556 else
3557 n_elts00 = 1;
3559 if (VECTOR_MODE_P (mode01))
3561 elt_size = GET_MODE_UNIT_SIZE (mode01);
3562 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3564 else
3565 n_elts01 = 1;
3567 gcc_assert (n_elts == n_elts00 + n_elts01);
3569 /* Select correct operand of VEC_CONCAT
3570 and adjust selector. */
3571 if (elem < n_elts01)
3572 tmp_op = op00;
3573 else
3575 tmp_op = op01;
3576 elem -= n_elts00;
3579 else
3580 tmp_op = op0;
3582 vec = rtvec_alloc (1);
3583 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3585 tmp = gen_rtx_fmt_ee (code, mode,
3586 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3587 return tmp;
3589 if (GET_CODE (trueop0) == VEC_DUPLICATE
3590 && GET_MODE (XEXP (trueop0, 0)) == mode)
3591 return XEXP (trueop0, 0);
3593 else
3595 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3596 gcc_assert (GET_MODE_INNER (mode)
3597 == GET_MODE_INNER (GET_MODE (trueop0)));
3598 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3600 if (GET_CODE (trueop0) == CONST_VECTOR)
3602 int elt_size = GET_MODE_UNIT_SIZE (mode);
3603 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3604 rtvec v = rtvec_alloc (n_elts);
3605 unsigned int i;
3607 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3608 for (i = 0; i < n_elts; i++)
3610 rtx x = XVECEXP (trueop1, 0, i);
3612 gcc_assert (CONST_INT_P (x));
3613 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3614 INTVAL (x));
3617 return gen_rtx_CONST_VECTOR (mode, v);
3620 /* Recognize the identity. */
3621 if (GET_MODE (trueop0) == mode)
3623 bool maybe_ident = true;
3624 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3626 rtx j = XVECEXP (trueop1, 0, i);
3627 if (!CONST_INT_P (j) || INTVAL (j) != i)
3629 maybe_ident = false;
3630 break;
3633 if (maybe_ident)
3634 return trueop0;
3637 /* If we build {a,b} then permute it, build the result directly. */
3638 if (XVECLEN (trueop1, 0) == 2
3639 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3640 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3641 && GET_CODE (trueop0) == VEC_CONCAT
3642 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3643 && GET_MODE (XEXP (trueop0, 0)) == mode
3644 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3645 && GET_MODE (XEXP (trueop0, 1)) == mode)
3647 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3648 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3649 rtx subop0, subop1;
3651 gcc_assert (i0 < 4 && i1 < 4);
3652 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3653 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3655 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3658 if (XVECLEN (trueop1, 0) == 2
3659 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3660 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3661 && GET_CODE (trueop0) == VEC_CONCAT
3662 && GET_MODE (trueop0) == mode)
3664 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3665 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3666 rtx subop0, subop1;
3668 gcc_assert (i0 < 2 && i1 < 2);
3669 subop0 = XEXP (trueop0, i0);
3670 subop1 = XEXP (trueop0, i1);
3672 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3675 /* If we select one half of a vec_concat, return that. */
3676 if (GET_CODE (trueop0) == VEC_CONCAT
3677 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3679 rtx subop0 = XEXP (trueop0, 0);
3680 rtx subop1 = XEXP (trueop0, 1);
3681 machine_mode mode0 = GET_MODE (subop0);
3682 machine_mode mode1 = GET_MODE (subop1);
3683 int li = GET_MODE_UNIT_SIZE (mode0);
3684 int l0 = GET_MODE_SIZE (mode0) / li;
3685 int l1 = GET_MODE_SIZE (mode1) / li;
3686 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3687 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3689 bool success = true;
3690 for (int i = 1; i < l0; ++i)
3692 rtx j = XVECEXP (trueop1, 0, i);
3693 if (!CONST_INT_P (j) || INTVAL (j) != i)
3695 success = false;
3696 break;
3699 if (success)
3700 return subop0;
3702 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3704 bool success = true;
3705 for (int i = 1; i < l1; ++i)
3707 rtx j = XVECEXP (trueop1, 0, i);
3708 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3710 success = false;
3711 break;
3714 if (success)
3715 return subop1;
3720 if (XVECLEN (trueop1, 0) == 1
3721 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3722 && GET_CODE (trueop0) == VEC_CONCAT)
3724 rtx vec = trueop0;
3725 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3727 /* Try to find the element in the VEC_CONCAT. */
3728 while (GET_MODE (vec) != mode
3729 && GET_CODE (vec) == VEC_CONCAT)
3731 HOST_WIDE_INT vec_size;
3733 if (CONST_INT_P (XEXP (vec, 0)))
3735 /* vec_concat of two const_ints doesn't make sense with
3736 respect to modes. */
3737 if (CONST_INT_P (XEXP (vec, 1)))
3738 return 0;
3740 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3741 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3743 else
3744 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3746 if (offset < vec_size)
3747 vec = XEXP (vec, 0);
3748 else
3750 offset -= vec_size;
3751 vec = XEXP (vec, 1);
3753 vec = avoid_constant_pool_reference (vec);
3756 if (GET_MODE (vec) == mode)
3757 return vec;
3760 /* If we select elements in a vec_merge that all come from the same
3761 operand, select from that operand directly. */
3762 if (GET_CODE (op0) == VEC_MERGE)
3764 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3765 if (CONST_INT_P (trueop02))
3767 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3768 bool all_operand0 = true;
3769 bool all_operand1 = true;
3770 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3772 rtx j = XVECEXP (trueop1, 0, i);
3773 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3774 all_operand1 = false;
3775 else
3776 all_operand0 = false;
3778 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3779 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3780 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3781 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3785 /* If we have two nested selects that are inverses of each
3786 other, replace them with the source operand. */
3787 if (GET_CODE (trueop0) == VEC_SELECT
3788 && GET_MODE (XEXP (trueop0, 0)) == mode)
3790 rtx op0_subop1 = XEXP (trueop0, 1);
3791 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3792 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3794 /* Apply the outer ordering vector to the inner one. (The inner
3795 ordering vector is expressly permitted to be of a different
3796 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3797 then the two VEC_SELECTs cancel. */
3798 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3800 rtx x = XVECEXP (trueop1, 0, i);
3801 if (!CONST_INT_P (x))
3802 return 0;
3803 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3804 if (!CONST_INT_P (y) || i != INTVAL (y))
3805 return 0;
3807 return XEXP (trueop0, 0);
3810 return 0;
3811 case VEC_CONCAT:
3813 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3814 ? GET_MODE (trueop0)
3815 : GET_MODE_INNER (mode));
3816 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3817 ? GET_MODE (trueop1)
3818 : GET_MODE_INNER (mode));
3820 gcc_assert (VECTOR_MODE_P (mode));
3821 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3822 == GET_MODE_SIZE (mode));
3824 if (VECTOR_MODE_P (op0_mode))
3825 gcc_assert (GET_MODE_INNER (mode)
3826 == GET_MODE_INNER (op0_mode));
3827 else
3828 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3830 if (VECTOR_MODE_P (op1_mode))
3831 gcc_assert (GET_MODE_INNER (mode)
3832 == GET_MODE_INNER (op1_mode));
3833 else
3834 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3836 if ((GET_CODE (trueop0) == CONST_VECTOR
3837 || CONST_SCALAR_INT_P (trueop0)
3838 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3839 && (GET_CODE (trueop1) == CONST_VECTOR
3840 || CONST_SCALAR_INT_P (trueop1)
3841 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3843 int elt_size = GET_MODE_UNIT_SIZE (mode);
3844 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3845 rtvec v = rtvec_alloc (n_elts);
3846 unsigned int i;
3847 unsigned in_n_elts = 1;
3849 if (VECTOR_MODE_P (op0_mode))
3850 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3851 for (i = 0; i < n_elts; i++)
3853 if (i < in_n_elts)
3855 if (!VECTOR_MODE_P (op0_mode))
3856 RTVEC_ELT (v, i) = trueop0;
3857 else
3858 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3860 else
3862 if (!VECTOR_MODE_P (op1_mode))
3863 RTVEC_ELT (v, i) = trueop1;
3864 else
3865 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3866 i - in_n_elts);
3870 return gen_rtx_CONST_VECTOR (mode, v);
3873 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3874 Restrict the transformation to avoid generating a VEC_SELECT with a
3875 mode unrelated to its operand. */
3876 if (GET_CODE (trueop0) == VEC_SELECT
3877 && GET_CODE (trueop1) == VEC_SELECT
3878 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3879 && GET_MODE (XEXP (trueop0, 0)) == mode)
3881 rtx par0 = XEXP (trueop0, 1);
3882 rtx par1 = XEXP (trueop1, 1);
3883 int len0 = XVECLEN (par0, 0);
3884 int len1 = XVECLEN (par1, 0);
3885 rtvec vec = rtvec_alloc (len0 + len1);
3886 for (int i = 0; i < len0; i++)
3887 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3888 for (int i = 0; i < len1; i++)
3889 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3890 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3891 gen_rtx_PARALLEL (VOIDmode, vec));
3894 return 0;
3896 default:
3897 gcc_unreachable ();
3900 return 0;
3904 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3905 rtx op0, rtx op1)
3907 if (VECTOR_MODE_P (mode)
3908 && code != VEC_CONCAT
3909 && GET_CODE (op0) == CONST_VECTOR
3910 && GET_CODE (op1) == CONST_VECTOR)
3912 unsigned n_elts = GET_MODE_NUNITS (mode);
3913 machine_mode op0mode = GET_MODE (op0);
3914 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3915 machine_mode op1mode = GET_MODE (op1);
3916 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3917 rtvec v = rtvec_alloc (n_elts);
3918 unsigned int i;
3920 gcc_assert (op0_n_elts == n_elts);
3921 gcc_assert (op1_n_elts == n_elts);
3922 for (i = 0; i < n_elts; i++)
3924 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3925 CONST_VECTOR_ELT (op0, i),
3926 CONST_VECTOR_ELT (op1, i));
3927 if (!x)
3928 return 0;
3929 RTVEC_ELT (v, i) = x;
3932 return gen_rtx_CONST_VECTOR (mode, v);
3935 if (VECTOR_MODE_P (mode)
3936 && code == VEC_CONCAT
3937 && (CONST_SCALAR_INT_P (op0)
3938 || GET_CODE (op0) == CONST_FIXED
3939 || CONST_DOUBLE_AS_FLOAT_P (op0))
3940 && (CONST_SCALAR_INT_P (op1)
3941 || CONST_DOUBLE_AS_FLOAT_P (op1)
3942 || GET_CODE (op1) == CONST_FIXED))
3944 unsigned n_elts = GET_MODE_NUNITS (mode);
3945 rtvec v = rtvec_alloc (n_elts);
3947 gcc_assert (n_elts >= 2);
3948 if (n_elts == 2)
3950 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3951 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3953 RTVEC_ELT (v, 0) = op0;
3954 RTVEC_ELT (v, 1) = op1;
3956 else
3958 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3959 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3960 unsigned i;
3962 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3963 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3964 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3966 for (i = 0; i < op0_n_elts; ++i)
3967 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3968 for (i = 0; i < op1_n_elts; ++i)
3969 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3972 return gen_rtx_CONST_VECTOR (mode, v);
3975 if (SCALAR_FLOAT_MODE_P (mode)
3976 && CONST_DOUBLE_AS_FLOAT_P (op0)
3977 && CONST_DOUBLE_AS_FLOAT_P (op1)
3978 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3980 if (code == AND
3981 || code == IOR
3982 || code == XOR)
3984 long tmp0[4];
3985 long tmp1[4];
3986 REAL_VALUE_TYPE r;
3987 int i;
3989 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3990 GET_MODE (op0));
3991 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3992 GET_MODE (op1));
3993 for (i = 0; i < 4; i++)
3995 switch (code)
3997 case AND:
3998 tmp0[i] &= tmp1[i];
3999 break;
4000 case IOR:
4001 tmp0[i] |= tmp1[i];
4002 break;
4003 case XOR:
4004 tmp0[i] ^= tmp1[i];
4005 break;
4006 default:
4007 gcc_unreachable ();
4010 real_from_target (&r, tmp0, mode);
4011 return const_double_from_real_value (r, mode);
4013 else
4015 REAL_VALUE_TYPE f0, f1, value, result;
4016 const REAL_VALUE_TYPE *opr0, *opr1;
4017 bool inexact;
4019 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4020 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4022 if (HONOR_SNANS (mode)
4023 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4024 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4025 return 0;
4027 real_convert (&f0, mode, opr0);
4028 real_convert (&f1, mode, opr1);
4030 if (code == DIV
4031 && real_equal (&f1, &dconst0)
4032 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4033 return 0;
4035 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4036 && flag_trapping_math
4037 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4039 int s0 = REAL_VALUE_NEGATIVE (f0);
4040 int s1 = REAL_VALUE_NEGATIVE (f1);
4042 switch (code)
4044 case PLUS:
4045 /* Inf + -Inf = NaN plus exception. */
4046 if (s0 != s1)
4047 return 0;
4048 break;
4049 case MINUS:
4050 /* Inf - Inf = NaN plus exception. */
4051 if (s0 == s1)
4052 return 0;
4053 break;
4054 case DIV:
4055 /* Inf / Inf = NaN plus exception. */
4056 return 0;
4057 default:
4058 break;
4062 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4063 && flag_trapping_math
4064 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4065 || (REAL_VALUE_ISINF (f1)
4066 && real_equal (&f0, &dconst0))))
4067 /* Inf * 0 = NaN plus exception. */
4068 return 0;
4070 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4071 &f0, &f1);
4072 real_convert (&result, mode, &value);
4074 /* Don't constant fold this floating point operation if
4075 the result has overflowed and flag_trapping_math. */
4077 if (flag_trapping_math
4078 && MODE_HAS_INFINITIES (mode)
4079 && REAL_VALUE_ISINF (result)
4080 && !REAL_VALUE_ISINF (f0)
4081 && !REAL_VALUE_ISINF (f1))
4082 /* Overflow plus exception. */
4083 return 0;
4085 /* Don't constant fold this floating point operation if the
4086 result may dependent upon the run-time rounding mode and
4087 flag_rounding_math is set, or if GCC's software emulation
4088 is unable to accurately represent the result. */
4090 if ((flag_rounding_math
4091 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4092 && (inexact || !real_identical (&result, &value)))
4093 return NULL_RTX;
4095 return const_double_from_real_value (result, mode);
4099 /* We can fold some multi-word operations. */
4100 scalar_int_mode int_mode;
4101 if (is_a <scalar_int_mode> (mode, &int_mode)
4102 && CONST_SCALAR_INT_P (op0)
4103 && CONST_SCALAR_INT_P (op1))
4105 wide_int result;
4106 bool overflow;
4107 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4108 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4110 #if TARGET_SUPPORTS_WIDE_INT == 0
4111 /* This assert keeps the simplification from producing a result
4112 that cannot be represented in a CONST_DOUBLE but a lot of
4113 upstream callers expect that this function never fails to
4114 simplify something and so you if you added this to the test
4115 above the code would die later anyway. If this assert
4116 happens, you just need to make the port support wide int. */
4117 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4118 #endif
4119 switch (code)
4121 case MINUS:
4122 result = wi::sub (pop0, pop1);
4123 break;
4125 case PLUS:
4126 result = wi::add (pop0, pop1);
4127 break;
4129 case MULT:
4130 result = wi::mul (pop0, pop1);
4131 break;
4133 case DIV:
4134 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4135 if (overflow)
4136 return NULL_RTX;
4137 break;
4139 case MOD:
4140 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4141 if (overflow)
4142 return NULL_RTX;
4143 break;
4145 case UDIV:
4146 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4147 if (overflow)
4148 return NULL_RTX;
4149 break;
4151 case UMOD:
4152 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4153 if (overflow)
4154 return NULL_RTX;
4155 break;
4157 case AND:
4158 result = wi::bit_and (pop0, pop1);
4159 break;
4161 case IOR:
4162 result = wi::bit_or (pop0, pop1);
4163 break;
4165 case XOR:
4166 result = wi::bit_xor (pop0, pop1);
4167 break;
4169 case SMIN:
4170 result = wi::smin (pop0, pop1);
4171 break;
4173 case SMAX:
4174 result = wi::smax (pop0, pop1);
4175 break;
4177 case UMIN:
4178 result = wi::umin (pop0, pop1);
4179 break;
4181 case UMAX:
4182 result = wi::umax (pop0, pop1);
4183 break;
4185 case LSHIFTRT:
4186 case ASHIFTRT:
4187 case ASHIFT:
4189 wide_int wop1 = pop1;
4190 if (SHIFT_COUNT_TRUNCATED)
4191 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4192 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4193 return NULL_RTX;
4195 switch (code)
4197 case LSHIFTRT:
4198 result = wi::lrshift (pop0, wop1);
4199 break;
4201 case ASHIFTRT:
4202 result = wi::arshift (pop0, wop1);
4203 break;
4205 case ASHIFT:
4206 result = wi::lshift (pop0, wop1);
4207 break;
4209 default:
4210 gcc_unreachable ();
4212 break;
4214 case ROTATE:
4215 case ROTATERT:
4217 if (wi::neg_p (pop1))
4218 return NULL_RTX;
4220 switch (code)
4222 case ROTATE:
4223 result = wi::lrotate (pop0, pop1);
4224 break;
4226 case ROTATERT:
4227 result = wi::rrotate (pop0, pop1);
4228 break;
4230 default:
4231 gcc_unreachable ();
4233 break;
4235 default:
4236 return NULL_RTX;
4238 return immed_wide_int_const (result, int_mode);
4241 return NULL_RTX;
4246 /* Return a positive integer if X should sort after Y. The value
4247 returned is 1 if and only if X and Y are both regs. */
4249 static int
4250 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4252 int result;
4254 result = (commutative_operand_precedence (y)
4255 - commutative_operand_precedence (x));
4256 if (result)
4257 return result + result;
4259 /* Group together equal REGs to do more simplification. */
4260 if (REG_P (x) && REG_P (y))
4261 return REGNO (x) > REGNO (y);
4263 return 0;
4266 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4267 operands may be another PLUS or MINUS.
4269 Rather than test for specific case, we do this by a brute-force method
4270 and do all possible simplifications until no more changes occur. Then
4271 we rebuild the operation.
4273 May return NULL_RTX when no changes were made. */
4275 static rtx
4276 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4277 rtx op1)
4279 struct simplify_plus_minus_op_data
4281 rtx op;
4282 short neg;
4283 } ops[16];
4284 rtx result, tem;
4285 int n_ops = 2;
4286 int changed, n_constants, canonicalized = 0;
4287 int i, j;
4289 memset (ops, 0, sizeof ops);
4291 /* Set up the two operands and then expand them until nothing has been
4292 changed. If we run out of room in our array, give up; this should
4293 almost never happen. */
4295 ops[0].op = op0;
4296 ops[0].neg = 0;
4297 ops[1].op = op1;
4298 ops[1].neg = (code == MINUS);
4302 changed = 0;
4303 n_constants = 0;
4305 for (i = 0; i < n_ops; i++)
4307 rtx this_op = ops[i].op;
4308 int this_neg = ops[i].neg;
4309 enum rtx_code this_code = GET_CODE (this_op);
4311 switch (this_code)
4313 case PLUS:
4314 case MINUS:
4315 if (n_ops == ARRAY_SIZE (ops))
4316 return NULL_RTX;
4318 ops[n_ops].op = XEXP (this_op, 1);
4319 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4320 n_ops++;
4322 ops[i].op = XEXP (this_op, 0);
4323 changed = 1;
4324 /* If this operand was negated then we will potentially
4325 canonicalize the expression. Similarly if we don't
4326 place the operands adjacent we're re-ordering the
4327 expression and thus might be performing a
4328 canonicalization. Ignore register re-ordering.
4329 ??? It might be better to shuffle the ops array here,
4330 but then (plus (plus (A, B), plus (C, D))) wouldn't
4331 be seen as non-canonical. */
4332 if (this_neg
4333 || (i != n_ops - 2
4334 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4335 canonicalized = 1;
4336 break;
4338 case NEG:
4339 ops[i].op = XEXP (this_op, 0);
4340 ops[i].neg = ! this_neg;
4341 changed = 1;
4342 canonicalized = 1;
4343 break;
4345 case CONST:
4346 if (n_ops != ARRAY_SIZE (ops)
4347 && GET_CODE (XEXP (this_op, 0)) == PLUS
4348 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4349 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4351 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4352 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4353 ops[n_ops].neg = this_neg;
4354 n_ops++;
4355 changed = 1;
4356 canonicalized = 1;
4358 break;
4360 case NOT:
4361 /* ~a -> (-a - 1) */
4362 if (n_ops != ARRAY_SIZE (ops))
4364 ops[n_ops].op = CONSTM1_RTX (mode);
4365 ops[n_ops++].neg = this_neg;
4366 ops[i].op = XEXP (this_op, 0);
4367 ops[i].neg = !this_neg;
4368 changed = 1;
4369 canonicalized = 1;
4371 break;
4373 case CONST_INT:
4374 n_constants++;
4375 if (this_neg)
4377 ops[i].op = neg_const_int (mode, this_op);
4378 ops[i].neg = 0;
4379 changed = 1;
4380 canonicalized = 1;
4382 break;
4384 default:
4385 break;
4389 while (changed);
4391 if (n_constants > 1)
4392 canonicalized = 1;
4394 gcc_assert (n_ops >= 2);
4396 /* If we only have two operands, we can avoid the loops. */
4397 if (n_ops == 2)
4399 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4400 rtx lhs, rhs;
4402 /* Get the two operands. Be careful with the order, especially for
4403 the cases where code == MINUS. */
4404 if (ops[0].neg && ops[1].neg)
4406 lhs = gen_rtx_NEG (mode, ops[0].op);
4407 rhs = ops[1].op;
4409 else if (ops[0].neg)
4411 lhs = ops[1].op;
4412 rhs = ops[0].op;
4414 else
4416 lhs = ops[0].op;
4417 rhs = ops[1].op;
4420 return simplify_const_binary_operation (code, mode, lhs, rhs);
4423 /* Now simplify each pair of operands until nothing changes. */
4424 while (1)
4426 /* Insertion sort is good enough for a small array. */
4427 for (i = 1; i < n_ops; i++)
4429 struct simplify_plus_minus_op_data save;
4430 int cmp;
4432 j = i - 1;
4433 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4434 if (cmp <= 0)
4435 continue;
4436 /* Just swapping registers doesn't count as canonicalization. */
4437 if (cmp != 1)
4438 canonicalized = 1;
4440 save = ops[i];
4442 ops[j + 1] = ops[j];
4443 while (j--
4444 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4445 ops[j + 1] = save;
4448 changed = 0;
4449 for (i = n_ops - 1; i > 0; i--)
4450 for (j = i - 1; j >= 0; j--)
4452 rtx lhs = ops[j].op, rhs = ops[i].op;
4453 int lneg = ops[j].neg, rneg = ops[i].neg;
4455 if (lhs != 0 && rhs != 0)
4457 enum rtx_code ncode = PLUS;
4459 if (lneg != rneg)
4461 ncode = MINUS;
4462 if (lneg)
4463 std::swap (lhs, rhs);
4465 else if (swap_commutative_operands_p (lhs, rhs))
4466 std::swap (lhs, rhs);
4468 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4469 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4471 rtx tem_lhs, tem_rhs;
4473 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4474 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4475 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4476 tem_rhs);
4478 if (tem && !CONSTANT_P (tem))
4479 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4481 else
4482 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4484 if (tem)
4486 /* Reject "simplifications" that just wrap the two
4487 arguments in a CONST. Failure to do so can result
4488 in infinite recursion with simplify_binary_operation
4489 when it calls us to simplify CONST operations.
4490 Also, if we find such a simplification, don't try
4491 any more combinations with this rhs: We must have
4492 something like symbol+offset, ie. one of the
4493 trivial CONST expressions we handle later. */
4494 if (GET_CODE (tem) == CONST
4495 && GET_CODE (XEXP (tem, 0)) == ncode
4496 && XEXP (XEXP (tem, 0), 0) == lhs
4497 && XEXP (XEXP (tem, 0), 1) == rhs)
4498 break;
4499 lneg &= rneg;
4500 if (GET_CODE (tem) == NEG)
4501 tem = XEXP (tem, 0), lneg = !lneg;
4502 if (CONST_INT_P (tem) && lneg)
4503 tem = neg_const_int (mode, tem), lneg = 0;
4505 ops[i].op = tem;
4506 ops[i].neg = lneg;
4507 ops[j].op = NULL_RTX;
4508 changed = 1;
4509 canonicalized = 1;
4514 if (!changed)
4515 break;
4517 /* Pack all the operands to the lower-numbered entries. */
4518 for (i = 0, j = 0; j < n_ops; j++)
4519 if (ops[j].op)
4521 ops[i] = ops[j];
4522 i++;
4524 n_ops = i;
4527 /* If nothing changed, check that rematerialization of rtl instructions
4528 is still required. */
4529 if (!canonicalized)
4531 /* Perform rematerialization if only all operands are registers and
4532 all operations are PLUS. */
4533 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4534 around rs6000 and how it uses the CA register. See PR67145. */
4535 for (i = 0; i < n_ops; i++)
4536 if (ops[i].neg
4537 || !REG_P (ops[i].op)
4538 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4539 && fixed_regs[REGNO (ops[i].op)]
4540 && !global_regs[REGNO (ops[i].op)]
4541 && ops[i].op != frame_pointer_rtx
4542 && ops[i].op != arg_pointer_rtx
4543 && ops[i].op != stack_pointer_rtx))
4544 return NULL_RTX;
4545 goto gen_result;
4548 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4549 if (n_ops == 2
4550 && CONST_INT_P (ops[1].op)
4551 && CONSTANT_P (ops[0].op)
4552 && ops[0].neg)
4553 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4555 /* We suppressed creation of trivial CONST expressions in the
4556 combination loop to avoid recursion. Create one manually now.
4557 The combination loop should have ensured that there is exactly
4558 one CONST_INT, and the sort will have ensured that it is last
4559 in the array and that any other constant will be next-to-last. */
4561 if (n_ops > 1
4562 && CONST_INT_P (ops[n_ops - 1].op)
4563 && CONSTANT_P (ops[n_ops - 2].op))
4565 rtx value = ops[n_ops - 1].op;
4566 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4567 value = neg_const_int (mode, value);
4568 if (CONST_INT_P (value))
4570 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4571 INTVAL (value));
4572 n_ops--;
4576 /* Put a non-negated operand first, if possible. */
4578 for (i = 0; i < n_ops && ops[i].neg; i++)
4579 continue;
4580 if (i == n_ops)
4581 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4582 else if (i != 0)
4584 tem = ops[0].op;
4585 ops[0] = ops[i];
4586 ops[i].op = tem;
4587 ops[i].neg = 1;
4590 /* Now make the result by performing the requested operations. */
4591 gen_result:
4592 result = ops[0].op;
4593 for (i = 1; i < n_ops; i++)
4594 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4595 mode, result, ops[i].op);
4597 return result;
4600 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4601 static bool
4602 plus_minus_operand_p (const_rtx x)
4604 return GET_CODE (x) == PLUS
4605 || GET_CODE (x) == MINUS
4606 || (GET_CODE (x) == CONST
4607 && GET_CODE (XEXP (x, 0)) == PLUS
4608 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4609 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4612 /* Like simplify_binary_operation except used for relational operators.
4613 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4614 not also be VOIDmode.
4616 CMP_MODE specifies in which mode the comparison is done in, so it is
4617 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4618 the operands or, if both are VOIDmode, the operands are compared in
4619 "infinite precision". */
4621 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4622 machine_mode cmp_mode, rtx op0, rtx op1)
4624 rtx tem, trueop0, trueop1;
4626 if (cmp_mode == VOIDmode)
4627 cmp_mode = GET_MODE (op0);
4628 if (cmp_mode == VOIDmode)
4629 cmp_mode = GET_MODE (op1);
4631 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4632 if (tem)
4634 if (SCALAR_FLOAT_MODE_P (mode))
4636 if (tem == const0_rtx)
4637 return CONST0_RTX (mode);
4638 #ifdef FLOAT_STORE_FLAG_VALUE
4640 REAL_VALUE_TYPE val;
4641 val = FLOAT_STORE_FLAG_VALUE (mode);
4642 return const_double_from_real_value (val, mode);
4644 #else
4645 return NULL_RTX;
4646 #endif
4648 if (VECTOR_MODE_P (mode))
4650 if (tem == const0_rtx)
4651 return CONST0_RTX (mode);
4652 #ifdef VECTOR_STORE_FLAG_VALUE
4654 int i, units;
4655 rtvec v;
4657 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4658 if (val == NULL_RTX)
4659 return NULL_RTX;
4660 if (val == const1_rtx)
4661 return CONST1_RTX (mode);
4663 units = GET_MODE_NUNITS (mode);
4664 v = rtvec_alloc (units);
4665 for (i = 0; i < units; i++)
4666 RTVEC_ELT (v, i) = val;
4667 return gen_rtx_raw_CONST_VECTOR (mode, v);
4669 #else
4670 return NULL_RTX;
4671 #endif
4674 return tem;
4677 /* For the following tests, ensure const0_rtx is op1. */
4678 if (swap_commutative_operands_p (op0, op1)
4679 || (op0 == const0_rtx && op1 != const0_rtx))
4680 std::swap (op0, op1), code = swap_condition (code);
4682 /* If op0 is a compare, extract the comparison arguments from it. */
4683 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4684 return simplify_gen_relational (code, mode, VOIDmode,
4685 XEXP (op0, 0), XEXP (op0, 1));
4687 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4688 || CC0_P (op0))
4689 return NULL_RTX;
4691 trueop0 = avoid_constant_pool_reference (op0);
4692 trueop1 = avoid_constant_pool_reference (op1);
4693 return simplify_relational_operation_1 (code, mode, cmp_mode,
4694 trueop0, trueop1);
4697 /* This part of simplify_relational_operation is only used when CMP_MODE
4698 is not in class MODE_CC (i.e. it is a real comparison).
4700 MODE is the mode of the result, while CMP_MODE specifies in which
4701 mode the comparison is done in, so it is the mode of the operands. */
4703 static rtx
4704 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4705 machine_mode cmp_mode, rtx op0, rtx op1)
4707 enum rtx_code op0code = GET_CODE (op0);
4709 if (op1 == const0_rtx && COMPARISON_P (op0))
4711 /* If op0 is a comparison, extract the comparison arguments
4712 from it. */
4713 if (code == NE)
4715 if (GET_MODE (op0) == mode)
4716 return simplify_rtx (op0);
4717 else
4718 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4719 XEXP (op0, 0), XEXP (op0, 1));
4721 else if (code == EQ)
4723 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4724 if (new_code != UNKNOWN)
4725 return simplify_gen_relational (new_code, mode, VOIDmode,
4726 XEXP (op0, 0), XEXP (op0, 1));
4730 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4731 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4732 if ((code == LTU || code == GEU)
4733 && GET_CODE (op0) == PLUS
4734 && CONST_INT_P (XEXP (op0, 1))
4735 && (rtx_equal_p (op1, XEXP (op0, 0))
4736 || rtx_equal_p (op1, XEXP (op0, 1)))
4737 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4738 && XEXP (op0, 1) != const0_rtx)
4740 rtx new_cmp
4741 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4742 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4743 cmp_mode, XEXP (op0, 0), new_cmp);
4746 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4747 transformed into (LTU a -C). */
4748 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4749 && CONST_INT_P (XEXP (op0, 1))
4750 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4751 && XEXP (op0, 1) != const0_rtx)
4753 rtx new_cmp
4754 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4755 return simplify_gen_relational (LTU, mode, cmp_mode,
4756 XEXP (op0, 0), new_cmp);
4759 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4760 if ((code == LTU || code == GEU)
4761 && GET_CODE (op0) == PLUS
4762 && rtx_equal_p (op1, XEXP (op0, 1))
4763 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4764 && !rtx_equal_p (op1, XEXP (op0, 0)))
4765 return simplify_gen_relational (code, mode, cmp_mode, op0,
4766 copy_rtx (XEXP (op0, 0)));
4768 if (op1 == const0_rtx)
4770 /* Canonicalize (GTU x 0) as (NE x 0). */
4771 if (code == GTU)
4772 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4773 /* Canonicalize (LEU x 0) as (EQ x 0). */
4774 if (code == LEU)
4775 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4777 else if (op1 == const1_rtx)
4779 switch (code)
4781 case GE:
4782 /* Canonicalize (GE x 1) as (GT x 0). */
4783 return simplify_gen_relational (GT, mode, cmp_mode,
4784 op0, const0_rtx);
4785 case GEU:
4786 /* Canonicalize (GEU x 1) as (NE x 0). */
4787 return simplify_gen_relational (NE, mode, cmp_mode,
4788 op0, const0_rtx);
4789 case LT:
4790 /* Canonicalize (LT x 1) as (LE x 0). */
4791 return simplify_gen_relational (LE, mode, cmp_mode,
4792 op0, const0_rtx);
4793 case LTU:
4794 /* Canonicalize (LTU x 1) as (EQ x 0). */
4795 return simplify_gen_relational (EQ, mode, cmp_mode,
4796 op0, const0_rtx);
4797 default:
4798 break;
4801 else if (op1 == constm1_rtx)
4803 /* Canonicalize (LE x -1) as (LT x 0). */
4804 if (code == LE)
4805 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4806 /* Canonicalize (GT x -1) as (GE x 0). */
4807 if (code == GT)
4808 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4811 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4812 if ((code == EQ || code == NE)
4813 && (op0code == PLUS || op0code == MINUS)
4814 && CONSTANT_P (op1)
4815 && CONSTANT_P (XEXP (op0, 1))
4816 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4818 rtx x = XEXP (op0, 0);
4819 rtx c = XEXP (op0, 1);
4820 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4821 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4823 /* Detect an infinite recursive condition, where we oscillate at this
4824 simplification case between:
4825 A + B == C <---> C - B == A,
4826 where A, B, and C are all constants with non-simplifiable expressions,
4827 usually SYMBOL_REFs. */
4828 if (GET_CODE (tem) == invcode
4829 && CONSTANT_P (x)
4830 && rtx_equal_p (c, XEXP (tem, 1)))
4831 return NULL_RTX;
4833 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4836 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4837 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4838 scalar_int_mode int_mode, int_cmp_mode;
4839 if (code == NE
4840 && op1 == const0_rtx
4841 && is_int_mode (mode, &int_mode)
4842 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4843 /* ??? Work-around BImode bugs in the ia64 backend. */
4844 && int_mode != BImode
4845 && int_cmp_mode != BImode
4846 && nonzero_bits (op0, int_cmp_mode) == 1
4847 && STORE_FLAG_VALUE == 1)
4848 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4849 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
4850 : lowpart_subreg (int_mode, op0, int_cmp_mode);
4852 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4853 if ((code == EQ || code == NE)
4854 && op1 == const0_rtx
4855 && op0code == XOR)
4856 return simplify_gen_relational (code, mode, cmp_mode,
4857 XEXP (op0, 0), XEXP (op0, 1));
4859 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4860 if ((code == EQ || code == NE)
4861 && op0code == XOR
4862 && rtx_equal_p (XEXP (op0, 0), op1)
4863 && !side_effects_p (XEXP (op0, 0)))
4864 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4865 CONST0_RTX (mode));
4867 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4868 if ((code == EQ || code == NE)
4869 && op0code == XOR
4870 && rtx_equal_p (XEXP (op0, 1), op1)
4871 && !side_effects_p (XEXP (op0, 1)))
4872 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4873 CONST0_RTX (mode));
4875 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4876 if ((code == EQ || code == NE)
4877 && op0code == XOR
4878 && CONST_SCALAR_INT_P (op1)
4879 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4880 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4881 simplify_gen_binary (XOR, cmp_mode,
4882 XEXP (op0, 1), op1));
4884 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4885 can be implemented with a BICS instruction on some targets, or
4886 constant-folded if y is a constant. */
4887 if ((code == EQ || code == NE)
4888 && op0code == AND
4889 && rtx_equal_p (XEXP (op0, 0), op1)
4890 && !side_effects_p (op1)
4891 && op1 != CONST0_RTX (cmp_mode))
4893 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4894 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4896 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4897 CONST0_RTX (cmp_mode));
4900 /* Likewise for (eq/ne (and x y) y). */
4901 if ((code == EQ || code == NE)
4902 && op0code == AND
4903 && rtx_equal_p (XEXP (op0, 1), op1)
4904 && !side_effects_p (op1)
4905 && op1 != CONST0_RTX (cmp_mode))
4907 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4908 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4910 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4911 CONST0_RTX (cmp_mode));
4914 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4915 if ((code == EQ || code == NE)
4916 && GET_CODE (op0) == BSWAP
4917 && CONST_SCALAR_INT_P (op1))
4918 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4919 simplify_gen_unary (BSWAP, cmp_mode,
4920 op1, cmp_mode));
4922 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4923 if ((code == EQ || code == NE)
4924 && GET_CODE (op0) == BSWAP
4925 && GET_CODE (op1) == BSWAP)
4926 return simplify_gen_relational (code, mode, cmp_mode,
4927 XEXP (op0, 0), XEXP (op1, 0));
4929 if (op0code == POPCOUNT && op1 == const0_rtx)
4930 switch (code)
4932 case EQ:
4933 case LE:
4934 case LEU:
4935 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4936 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4937 XEXP (op0, 0), const0_rtx);
4939 case NE:
4940 case GT:
4941 case GTU:
4942 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4943 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4944 XEXP (op0, 0), const0_rtx);
4946 default:
4947 break;
4950 return NULL_RTX;
4953 enum
4955 CMP_EQ = 1,
4956 CMP_LT = 2,
4957 CMP_GT = 4,
4958 CMP_LTU = 8,
4959 CMP_GTU = 16
4963 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4964 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4965 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4966 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4967 For floating-point comparisons, assume that the operands were ordered. */
4969 static rtx
4970 comparison_result (enum rtx_code code, int known_results)
4972 switch (code)
4974 case EQ:
4975 case UNEQ:
4976 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4977 case NE:
4978 case LTGT:
4979 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4981 case LT:
4982 case UNLT:
4983 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4984 case GE:
4985 case UNGE:
4986 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4988 case GT:
4989 case UNGT:
4990 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4991 case LE:
4992 case UNLE:
4993 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4995 case LTU:
4996 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4997 case GEU:
4998 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5000 case GTU:
5001 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5002 case LEU:
5003 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5005 case ORDERED:
5006 return const_true_rtx;
5007 case UNORDERED:
5008 return const0_rtx;
5009 default:
5010 gcc_unreachable ();
5014 /* Check if the given comparison (done in the given MODE) is actually
5015 a tautology or a contradiction. If the mode is VOID_mode, the
5016 comparison is done in "infinite precision". If no simplification
5017 is possible, this function returns zero. Otherwise, it returns
5018 either const_true_rtx or const0_rtx. */
5021 simplify_const_relational_operation (enum rtx_code code,
5022 machine_mode mode,
5023 rtx op0, rtx op1)
5025 rtx tem;
5026 rtx trueop0;
5027 rtx trueop1;
5029 gcc_assert (mode != VOIDmode
5030 || (GET_MODE (op0) == VOIDmode
5031 && GET_MODE (op1) == VOIDmode));
5033 /* If op0 is a compare, extract the comparison arguments from it. */
5034 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5036 op1 = XEXP (op0, 1);
5037 op0 = XEXP (op0, 0);
5039 if (GET_MODE (op0) != VOIDmode)
5040 mode = GET_MODE (op0);
5041 else if (GET_MODE (op1) != VOIDmode)
5042 mode = GET_MODE (op1);
5043 else
5044 return 0;
5047 /* We can't simplify MODE_CC values since we don't know what the
5048 actual comparison is. */
5049 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5050 return 0;
5052 /* Make sure the constant is second. */
5053 if (swap_commutative_operands_p (op0, op1))
5055 std::swap (op0, op1);
5056 code = swap_condition (code);
5059 trueop0 = avoid_constant_pool_reference (op0);
5060 trueop1 = avoid_constant_pool_reference (op1);
5062 /* For integer comparisons of A and B maybe we can simplify A - B and can
5063 then simplify a comparison of that with zero. If A and B are both either
5064 a register or a CONST_INT, this can't help; testing for these cases will
5065 prevent infinite recursion here and speed things up.
5067 We can only do this for EQ and NE comparisons as otherwise we may
5068 lose or introduce overflow which we cannot disregard as undefined as
5069 we do not know the signedness of the operation on either the left or
5070 the right hand side of the comparison. */
5072 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5073 && (code == EQ || code == NE)
5074 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5075 && (REG_P (op1) || CONST_INT_P (trueop1)))
5076 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5077 /* We cannot do this if tem is a nonzero address. */
5078 && ! nonzero_address_p (tem))
5079 return simplify_const_relational_operation (signed_condition (code),
5080 mode, tem, const0_rtx);
5082 if (! HONOR_NANS (mode) && code == ORDERED)
5083 return const_true_rtx;
5085 if (! HONOR_NANS (mode) && code == UNORDERED)
5086 return const0_rtx;
5088 /* For modes without NaNs, if the two operands are equal, we know the
5089 result except if they have side-effects. Even with NaNs we know
5090 the result of unordered comparisons and, if signaling NaNs are
5091 irrelevant, also the result of LT/GT/LTGT. */
5092 if ((! HONOR_NANS (trueop0)
5093 || code == UNEQ || code == UNLE || code == UNGE
5094 || ((code == LT || code == GT || code == LTGT)
5095 && ! HONOR_SNANS (trueop0)))
5096 && rtx_equal_p (trueop0, trueop1)
5097 && ! side_effects_p (trueop0))
5098 return comparison_result (code, CMP_EQ);
5100 /* If the operands are floating-point constants, see if we can fold
5101 the result. */
5102 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5103 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5104 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5106 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5107 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5109 /* Comparisons are unordered iff at least one of the values is NaN. */
5110 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5111 switch (code)
5113 case UNEQ:
5114 case UNLT:
5115 case UNGT:
5116 case UNLE:
5117 case UNGE:
5118 case NE:
5119 case UNORDERED:
5120 return const_true_rtx;
5121 case EQ:
5122 case LT:
5123 case GT:
5124 case LE:
5125 case GE:
5126 case LTGT:
5127 case ORDERED:
5128 return const0_rtx;
5129 default:
5130 return 0;
5133 return comparison_result (code,
5134 (real_equal (d0, d1) ? CMP_EQ :
5135 real_less (d0, d1) ? CMP_LT : CMP_GT));
5138 /* Otherwise, see if the operands are both integers. */
5139 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5140 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5142 /* It would be nice if we really had a mode here. However, the
5143 largest int representable on the target is as good as
5144 infinite. */
5145 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5146 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5147 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5149 if (wi::eq_p (ptrueop0, ptrueop1))
5150 return comparison_result (code, CMP_EQ);
5151 else
5153 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5154 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5155 return comparison_result (code, cr);
5159 /* Optimize comparisons with upper and lower bounds. */
5160 scalar_int_mode int_mode;
5161 if (CONST_INT_P (trueop1)
5162 && is_a <scalar_int_mode> (mode, &int_mode)
5163 && HWI_COMPUTABLE_MODE_P (int_mode)
5164 && !side_effects_p (trueop0))
5166 int sign;
5167 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5168 HOST_WIDE_INT val = INTVAL (trueop1);
5169 HOST_WIDE_INT mmin, mmax;
5171 if (code == GEU
5172 || code == LEU
5173 || code == GTU
5174 || code == LTU)
5175 sign = 0;
5176 else
5177 sign = 1;
5179 /* Get a reduced range if the sign bit is zero. */
5180 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5182 mmin = 0;
5183 mmax = nonzero;
5185 else
5187 rtx mmin_rtx, mmax_rtx;
5188 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5190 mmin = INTVAL (mmin_rtx);
5191 mmax = INTVAL (mmax_rtx);
5192 if (sign)
5194 unsigned int sign_copies
5195 = num_sign_bit_copies (trueop0, int_mode);
5197 mmin >>= (sign_copies - 1);
5198 mmax >>= (sign_copies - 1);
5202 switch (code)
5204 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5205 case GEU:
5206 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5207 return const_true_rtx;
5208 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5209 return const0_rtx;
5210 break;
5211 case GE:
5212 if (val <= mmin)
5213 return const_true_rtx;
5214 if (val > mmax)
5215 return const0_rtx;
5216 break;
5218 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5219 case LEU:
5220 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5221 return const_true_rtx;
5222 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5223 return const0_rtx;
5224 break;
5225 case LE:
5226 if (val >= mmax)
5227 return const_true_rtx;
5228 if (val < mmin)
5229 return const0_rtx;
5230 break;
5232 case EQ:
5233 /* x == y is always false for y out of range. */
5234 if (val < mmin || val > mmax)
5235 return const0_rtx;
5236 break;
5238 /* x > y is always false for y >= mmax, always true for y < mmin. */
5239 case GTU:
5240 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5241 return const0_rtx;
5242 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5243 return const_true_rtx;
5244 break;
5245 case GT:
5246 if (val >= mmax)
5247 return const0_rtx;
5248 if (val < mmin)
5249 return const_true_rtx;
5250 break;
5252 /* x < y is always false for y <= mmin, always true for y > mmax. */
5253 case LTU:
5254 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5255 return const0_rtx;
5256 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5257 return const_true_rtx;
5258 break;
5259 case LT:
5260 if (val <= mmin)
5261 return const0_rtx;
5262 if (val > mmax)
5263 return const_true_rtx;
5264 break;
5266 case NE:
5267 /* x != y is always true for y out of range. */
5268 if (val < mmin || val > mmax)
5269 return const_true_rtx;
5270 break;
5272 default:
5273 break;
5277 /* Optimize integer comparisons with zero. */
5278 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5280 /* Some addresses are known to be nonzero. We don't know
5281 their sign, but equality comparisons are known. */
5282 if (nonzero_address_p (trueop0))
5284 if (code == EQ || code == LEU)
5285 return const0_rtx;
5286 if (code == NE || code == GTU)
5287 return const_true_rtx;
5290 /* See if the first operand is an IOR with a constant. If so, we
5291 may be able to determine the result of this comparison. */
5292 if (GET_CODE (op0) == IOR)
5294 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5295 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5297 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5298 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5299 && (UINTVAL (inner_const)
5300 & (HOST_WIDE_INT_1U
5301 << sign_bitnum)));
5303 switch (code)
5305 case EQ:
5306 case LEU:
5307 return const0_rtx;
5308 case NE:
5309 case GTU:
5310 return const_true_rtx;
5311 case LT:
5312 case LE:
5313 if (has_sign)
5314 return const_true_rtx;
5315 break;
5316 case GT:
5317 case GE:
5318 if (has_sign)
5319 return const0_rtx;
5320 break;
5321 default:
5322 break;
5328 /* Optimize comparison of ABS with zero. */
5329 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5330 && (GET_CODE (trueop0) == ABS
5331 || (GET_CODE (trueop0) == FLOAT_EXTEND
5332 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5334 switch (code)
5336 case LT:
5337 /* Optimize abs(x) < 0.0. */
5338 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5339 return const0_rtx;
5340 break;
5342 case GE:
5343 /* Optimize abs(x) >= 0.0. */
5344 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5345 return const_true_rtx;
5346 break;
5348 case UNGE:
5349 /* Optimize ! (abs(x) < 0.0). */
5350 return const_true_rtx;
5352 default:
5353 break;
5357 return 0;
5360 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5361 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5362 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5363 can be simplified to that or NULL_RTX if not.
5364 Assume X is compared against zero with CMP_CODE and the true
5365 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5367 static rtx
5368 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5370 if (cmp_code != EQ && cmp_code != NE)
5371 return NULL_RTX;
5373 /* Result on X == 0 and X !=0 respectively. */
5374 rtx on_zero, on_nonzero;
5375 if (cmp_code == EQ)
5377 on_zero = true_val;
5378 on_nonzero = false_val;
5380 else
5382 on_zero = false_val;
5383 on_nonzero = true_val;
5386 rtx_code op_code = GET_CODE (on_nonzero);
5387 if ((op_code != CLZ && op_code != CTZ)
5388 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5389 || !CONST_INT_P (on_zero))
5390 return NULL_RTX;
5392 HOST_WIDE_INT op_val;
5393 if (((op_code == CLZ
5394 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5395 || (op_code == CTZ
5396 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5397 && op_val == INTVAL (on_zero))
5398 return on_nonzero;
5400 return NULL_RTX;
5404 /* Simplify CODE, an operation with result mode MODE and three operands,
5405 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5406 a constant. Return 0 if no simplifications is possible. */
5409 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5410 machine_mode op0_mode, rtx op0, rtx op1,
5411 rtx op2)
5413 unsigned int width = GET_MODE_PRECISION (mode);
5414 bool any_change = false;
5415 rtx tem, trueop2;
5417 /* VOIDmode means "infinite" precision. */
5418 if (width == 0)
5419 width = HOST_BITS_PER_WIDE_INT;
5421 switch (code)
5423 case FMA:
5424 /* Simplify negations around the multiplication. */
5425 /* -a * -b + c => a * b + c. */
5426 if (GET_CODE (op0) == NEG)
5428 tem = simplify_unary_operation (NEG, mode, op1, mode);
5429 if (tem)
5430 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5432 else if (GET_CODE (op1) == NEG)
5434 tem = simplify_unary_operation (NEG, mode, op0, mode);
5435 if (tem)
5436 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5439 /* Canonicalize the two multiplication operands. */
5440 /* a * -b + c => -b * a + c. */
5441 if (swap_commutative_operands_p (op0, op1))
5442 std::swap (op0, op1), any_change = true;
5444 if (any_change)
5445 return gen_rtx_FMA (mode, op0, op1, op2);
5446 return NULL_RTX;
5448 case SIGN_EXTRACT:
5449 case ZERO_EXTRACT:
5450 if (CONST_INT_P (op0)
5451 && CONST_INT_P (op1)
5452 && CONST_INT_P (op2)
5453 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5454 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5456 /* Extracting a bit-field from a constant */
5457 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5458 HOST_WIDE_INT op1val = INTVAL (op1);
5459 HOST_WIDE_INT op2val = INTVAL (op2);
5460 if (BITS_BIG_ENDIAN)
5461 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5462 else
5463 val >>= op2val;
5465 if (HOST_BITS_PER_WIDE_INT != op1val)
5467 /* First zero-extend. */
5468 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5469 /* If desired, propagate sign bit. */
5470 if (code == SIGN_EXTRACT
5471 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5472 != 0)
5473 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5476 return gen_int_mode (val, mode);
5478 break;
5480 case IF_THEN_ELSE:
5481 if (CONST_INT_P (op0))
5482 return op0 != const0_rtx ? op1 : op2;
5484 /* Convert c ? a : a into "a". */
5485 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5486 return op1;
5488 /* Convert a != b ? a : b into "a". */
5489 if (GET_CODE (op0) == NE
5490 && ! side_effects_p (op0)
5491 && ! HONOR_NANS (mode)
5492 && ! HONOR_SIGNED_ZEROS (mode)
5493 && ((rtx_equal_p (XEXP (op0, 0), op1)
5494 && rtx_equal_p (XEXP (op0, 1), op2))
5495 || (rtx_equal_p (XEXP (op0, 0), op2)
5496 && rtx_equal_p (XEXP (op0, 1), op1))))
5497 return op1;
5499 /* Convert a == b ? a : b into "b". */
5500 if (GET_CODE (op0) == EQ
5501 && ! side_effects_p (op0)
5502 && ! HONOR_NANS (mode)
5503 && ! HONOR_SIGNED_ZEROS (mode)
5504 && ((rtx_equal_p (XEXP (op0, 0), op1)
5505 && rtx_equal_p (XEXP (op0, 1), op2))
5506 || (rtx_equal_p (XEXP (op0, 0), op2)
5507 && rtx_equal_p (XEXP (op0, 1), op1))))
5508 return op2;
5510 /* Convert (!c) != {0,...,0} ? a : b into
5511 c != {0,...,0} ? b : a for vector modes. */
5512 if (VECTOR_MODE_P (GET_MODE (op1))
5513 && GET_CODE (op0) == NE
5514 && GET_CODE (XEXP (op0, 0)) == NOT
5515 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5517 rtx cv = XEXP (op0, 1);
5518 int nunits = CONST_VECTOR_NUNITS (cv);
5519 bool ok = true;
5520 for (int i = 0; i < nunits; ++i)
5521 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5523 ok = false;
5524 break;
5526 if (ok)
5528 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5529 XEXP (XEXP (op0, 0), 0),
5530 XEXP (op0, 1));
5531 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5532 return retval;
5536 /* Convert x == 0 ? N : clz (x) into clz (x) when
5537 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5538 Similarly for ctz (x). */
5539 if (COMPARISON_P (op0) && !side_effects_p (op0)
5540 && XEXP (op0, 1) == const0_rtx)
5542 rtx simplified
5543 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5544 op1, op2);
5545 if (simplified)
5546 return simplified;
5549 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5551 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5552 ? GET_MODE (XEXP (op0, 1))
5553 : GET_MODE (XEXP (op0, 0)));
5554 rtx temp;
5556 /* Look for happy constants in op1 and op2. */
5557 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5559 HOST_WIDE_INT t = INTVAL (op1);
5560 HOST_WIDE_INT f = INTVAL (op2);
5562 if (t == STORE_FLAG_VALUE && f == 0)
5563 code = GET_CODE (op0);
5564 else if (t == 0 && f == STORE_FLAG_VALUE)
5566 enum rtx_code tmp;
5567 tmp = reversed_comparison_code (op0, NULL);
5568 if (tmp == UNKNOWN)
5569 break;
5570 code = tmp;
5572 else
5573 break;
5575 return simplify_gen_relational (code, mode, cmp_mode,
5576 XEXP (op0, 0), XEXP (op0, 1));
5579 if (cmp_mode == VOIDmode)
5580 cmp_mode = op0_mode;
5581 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5582 cmp_mode, XEXP (op0, 0),
5583 XEXP (op0, 1));
5585 /* See if any simplifications were possible. */
5586 if (temp)
5588 if (CONST_INT_P (temp))
5589 return temp == const0_rtx ? op2 : op1;
5590 else if (temp)
5591 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5594 break;
5596 case VEC_MERGE:
5597 gcc_assert (GET_MODE (op0) == mode);
5598 gcc_assert (GET_MODE (op1) == mode);
5599 gcc_assert (VECTOR_MODE_P (mode));
5600 trueop2 = avoid_constant_pool_reference (op2);
5601 if (CONST_INT_P (trueop2))
5603 int elt_size = GET_MODE_UNIT_SIZE (mode);
5604 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5605 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5606 unsigned HOST_WIDE_INT mask;
5607 if (n_elts == HOST_BITS_PER_WIDE_INT)
5608 mask = -1;
5609 else
5610 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5612 if (!(sel & mask) && !side_effects_p (op0))
5613 return op1;
5614 if ((sel & mask) == mask && !side_effects_p (op1))
5615 return op0;
5617 rtx trueop0 = avoid_constant_pool_reference (op0);
5618 rtx trueop1 = avoid_constant_pool_reference (op1);
5619 if (GET_CODE (trueop0) == CONST_VECTOR
5620 && GET_CODE (trueop1) == CONST_VECTOR)
5622 rtvec v = rtvec_alloc (n_elts);
5623 unsigned int i;
5625 for (i = 0; i < n_elts; i++)
5626 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5627 ? CONST_VECTOR_ELT (trueop0, i)
5628 : CONST_VECTOR_ELT (trueop1, i));
5629 return gen_rtx_CONST_VECTOR (mode, v);
5632 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5633 if no element from a appears in the result. */
5634 if (GET_CODE (op0) == VEC_MERGE)
5636 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5637 if (CONST_INT_P (tem))
5639 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5640 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5641 return simplify_gen_ternary (code, mode, mode,
5642 XEXP (op0, 1), op1, op2);
5643 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5644 return simplify_gen_ternary (code, mode, mode,
5645 XEXP (op0, 0), op1, op2);
5648 if (GET_CODE (op1) == VEC_MERGE)
5650 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5651 if (CONST_INT_P (tem))
5653 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5654 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5655 return simplify_gen_ternary (code, mode, mode,
5656 op0, XEXP (op1, 1), op2);
5657 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5658 return simplify_gen_ternary (code, mode, mode,
5659 op0, XEXP (op1, 0), op2);
5663 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5664 with a. */
5665 if (GET_CODE (op0) == VEC_DUPLICATE
5666 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5667 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5668 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5670 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5671 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5673 if (XEXP (XEXP (op0, 0), 0) == op1
5674 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5675 return op1;
5680 if (rtx_equal_p (op0, op1)
5681 && !side_effects_p (op2) && !side_effects_p (op1))
5682 return op0;
5684 break;
5686 default:
5687 gcc_unreachable ();
5690 return 0;
5693 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5694 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5695 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5697 Works by unpacking OP into a collection of 8-bit values
5698 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5699 and then repacking them again for OUTERMODE. */
5701 static rtx
5702 simplify_immed_subreg (machine_mode outermode, rtx op,
5703 machine_mode innermode, unsigned int byte)
5705 enum {
5706 value_bit = 8,
5707 value_mask = (1 << value_bit) - 1
5709 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5710 int value_start;
5711 int i;
5712 int elem;
5714 int num_elem;
5715 rtx * elems;
5716 int elem_bitsize;
5717 rtx result_s = NULL;
5718 rtvec result_v = NULL;
5719 enum mode_class outer_class;
5720 machine_mode outer_submode;
5721 int max_bitsize;
5723 /* Some ports misuse CCmode. */
5724 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5725 return op;
5727 /* We have no way to represent a complex constant at the rtl level. */
5728 if (COMPLEX_MODE_P (outermode))
5729 return NULL_RTX;
5731 /* We support any size mode. */
5732 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5733 GET_MODE_BITSIZE (innermode));
5735 /* Unpack the value. */
5737 if (GET_CODE (op) == CONST_VECTOR)
5739 num_elem = CONST_VECTOR_NUNITS (op);
5740 elems = &CONST_VECTOR_ELT (op, 0);
5741 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5743 else
5745 num_elem = 1;
5746 elems = &op;
5747 elem_bitsize = max_bitsize;
5749 /* If this asserts, it is too complicated; reducing value_bit may help. */
5750 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5751 /* I don't know how to handle endianness of sub-units. */
5752 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5754 for (elem = 0; elem < num_elem; elem++)
5756 unsigned char * vp;
5757 rtx el = elems[elem];
5759 /* Vectors are kept in target memory order. (This is probably
5760 a mistake.) */
5762 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5763 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5764 / BITS_PER_UNIT);
5765 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5766 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5767 unsigned bytele = (subword_byte % UNITS_PER_WORD
5768 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5769 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5772 switch (GET_CODE (el))
5774 case CONST_INT:
5775 for (i = 0;
5776 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5777 i += value_bit)
5778 *vp++ = INTVAL (el) >> i;
5779 /* CONST_INTs are always logically sign-extended. */
5780 for (; i < elem_bitsize; i += value_bit)
5781 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5782 break;
5784 case CONST_WIDE_INT:
5786 rtx_mode_t val = rtx_mode_t (el, innermode);
5787 unsigned char extend = wi::sign_mask (val);
5788 int prec = wi::get_precision (val);
5790 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5791 *vp++ = wi::extract_uhwi (val, i, value_bit);
5792 for (; i < elem_bitsize; i += value_bit)
5793 *vp++ = extend;
5795 break;
5797 case CONST_DOUBLE:
5798 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5800 unsigned char extend = 0;
5801 /* If this triggers, someone should have generated a
5802 CONST_INT instead. */
5803 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5805 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5806 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5807 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5809 *vp++
5810 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5811 i += value_bit;
5814 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5815 extend = -1;
5816 for (; i < elem_bitsize; i += value_bit)
5817 *vp++ = extend;
5819 else
5821 /* This is big enough for anything on the platform. */
5822 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5823 scalar_float_mode el_mode;
5825 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5826 int bitsize = GET_MODE_BITSIZE (el_mode);
5828 gcc_assert (bitsize <= elem_bitsize);
5829 gcc_assert (bitsize % value_bit == 0);
5831 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5832 GET_MODE (el));
5834 /* real_to_target produces its result in words affected by
5835 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5836 and use WORDS_BIG_ENDIAN instead; see the documentation
5837 of SUBREG in rtl.texi. */
5838 for (i = 0; i < bitsize; i += value_bit)
5840 int ibase;
5841 if (WORDS_BIG_ENDIAN)
5842 ibase = bitsize - 1 - i;
5843 else
5844 ibase = i;
5845 *vp++ = tmp[ibase / 32] >> i % 32;
5848 /* It shouldn't matter what's done here, so fill it with
5849 zero. */
5850 for (; i < elem_bitsize; i += value_bit)
5851 *vp++ = 0;
5853 break;
5855 case CONST_FIXED:
5856 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5858 for (i = 0; i < elem_bitsize; i += value_bit)
5859 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5861 else
5863 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5864 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5865 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5866 i += value_bit)
5867 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5868 >> (i - HOST_BITS_PER_WIDE_INT);
5869 for (; i < elem_bitsize; i += value_bit)
5870 *vp++ = 0;
5872 break;
5874 default:
5875 gcc_unreachable ();
5879 /* Now, pick the right byte to start with. */
5880 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5881 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5882 will already have offset 0. */
5883 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5885 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5886 - byte);
5887 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5888 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5889 byte = (subword_byte % UNITS_PER_WORD
5890 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5893 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5894 so if it's become negative it will instead be very large.) */
5895 gcc_assert (byte < GET_MODE_SIZE (innermode));
5897 /* Convert from bytes to chunks of size value_bit. */
5898 value_start = byte * (BITS_PER_UNIT / value_bit);
5900 /* Re-pack the value. */
5901 num_elem = GET_MODE_NUNITS (outermode);
5903 if (VECTOR_MODE_P (outermode))
5905 result_v = rtvec_alloc (num_elem);
5906 elems = &RTVEC_ELT (result_v, 0);
5908 else
5909 elems = &result_s;
5911 outer_submode = GET_MODE_INNER (outermode);
5912 outer_class = GET_MODE_CLASS (outer_submode);
5913 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5915 gcc_assert (elem_bitsize % value_bit == 0);
5916 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5918 for (elem = 0; elem < num_elem; elem++)
5920 unsigned char *vp;
5922 /* Vectors are stored in target memory order. (This is probably
5923 a mistake.) */
5925 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5926 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5927 / BITS_PER_UNIT);
5928 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5929 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5930 unsigned bytele = (subword_byte % UNITS_PER_WORD
5931 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5932 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5935 switch (outer_class)
5937 case MODE_INT:
5938 case MODE_PARTIAL_INT:
5940 int u;
5941 int base = 0;
5942 int units
5943 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5944 / HOST_BITS_PER_WIDE_INT;
5945 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5946 wide_int r;
5948 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5949 return NULL_RTX;
5950 for (u = 0; u < units; u++)
5952 unsigned HOST_WIDE_INT buf = 0;
5953 for (i = 0;
5954 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5955 i += value_bit)
5956 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5958 tmp[u] = buf;
5959 base += HOST_BITS_PER_WIDE_INT;
5961 r = wide_int::from_array (tmp, units,
5962 GET_MODE_PRECISION (outer_submode));
5963 #if TARGET_SUPPORTS_WIDE_INT == 0
5964 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5965 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5966 return NULL_RTX;
5967 #endif
5968 elems[elem] = immed_wide_int_const (r, outer_submode);
5970 break;
5972 case MODE_FLOAT:
5973 case MODE_DECIMAL_FLOAT:
5975 REAL_VALUE_TYPE r;
5976 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5978 /* real_from_target wants its input in words affected by
5979 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5980 and use WORDS_BIG_ENDIAN instead; see the documentation
5981 of SUBREG in rtl.texi. */
5982 for (i = 0; i < elem_bitsize; i += value_bit)
5984 int ibase;
5985 if (WORDS_BIG_ENDIAN)
5986 ibase = elem_bitsize - 1 - i;
5987 else
5988 ibase = i;
5989 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5992 real_from_target (&r, tmp, outer_submode);
5993 elems[elem] = const_double_from_real_value (r, outer_submode);
5995 break;
5997 case MODE_FRACT:
5998 case MODE_UFRACT:
5999 case MODE_ACCUM:
6000 case MODE_UACCUM:
6002 FIXED_VALUE_TYPE f;
6003 f.data.low = 0;
6004 f.data.high = 0;
6005 f.mode = outer_submode;
6007 for (i = 0;
6008 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6009 i += value_bit)
6010 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6011 for (; i < elem_bitsize; i += value_bit)
6012 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6013 << (i - HOST_BITS_PER_WIDE_INT));
6015 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6017 break;
6019 default:
6020 gcc_unreachable ();
6023 if (VECTOR_MODE_P (outermode))
6024 return gen_rtx_CONST_VECTOR (outermode, result_v);
6025 else
6026 return result_s;
6029 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6030 Return 0 if no simplifications are possible. */
6032 simplify_subreg (machine_mode outermode, rtx op,
6033 machine_mode innermode, unsigned int byte)
6035 /* Little bit of sanity checking. */
6036 gcc_assert (innermode != VOIDmode);
6037 gcc_assert (outermode != VOIDmode);
6038 gcc_assert (innermode != BLKmode);
6039 gcc_assert (outermode != BLKmode);
6041 gcc_assert (GET_MODE (op) == innermode
6042 || GET_MODE (op) == VOIDmode);
6044 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6045 return NULL_RTX;
6047 if (byte >= GET_MODE_SIZE (innermode))
6048 return NULL_RTX;
6050 if (outermode == innermode && !byte)
6051 return op;
6053 if (CONST_SCALAR_INT_P (op)
6054 || CONST_DOUBLE_AS_FLOAT_P (op)
6055 || GET_CODE (op) == CONST_FIXED
6056 || GET_CODE (op) == CONST_VECTOR)
6057 return simplify_immed_subreg (outermode, op, innermode, byte);
6059 /* Changing mode twice with SUBREG => just change it once,
6060 or not at all if changing back op starting mode. */
6061 if (GET_CODE (op) == SUBREG)
6063 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6064 int final_offset = byte + SUBREG_BYTE (op);
6065 rtx newx;
6067 if (outermode == innermostmode
6068 && byte == 0 && SUBREG_BYTE (op) == 0)
6069 return SUBREG_REG (op);
6071 /* The SUBREG_BYTE represents offset, as if the value were stored
6072 in memory. Irritating exception is paradoxical subreg, where
6073 we define SUBREG_BYTE to be 0. On big endian machines, this
6074 value should be negative. For a moment, undo this exception. */
6075 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6077 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6078 if (WORDS_BIG_ENDIAN)
6079 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6080 if (BYTES_BIG_ENDIAN)
6081 final_offset += difference % UNITS_PER_WORD;
6083 if (SUBREG_BYTE (op) == 0
6084 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6086 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6087 if (WORDS_BIG_ENDIAN)
6088 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6089 if (BYTES_BIG_ENDIAN)
6090 final_offset += difference % UNITS_PER_WORD;
6093 /* See whether resulting subreg will be paradoxical. */
6094 if (!paradoxical_subreg_p (outermode, innermostmode))
6096 /* In nonparadoxical subregs we can't handle negative offsets. */
6097 if (final_offset < 0)
6098 return NULL_RTX;
6099 /* Bail out in case resulting subreg would be incorrect. */
6100 if (final_offset % GET_MODE_SIZE (outermode)
6101 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6102 return NULL_RTX;
6104 else
6106 int offset = 0;
6107 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6109 /* In paradoxical subreg, see if we are still looking on lower part.
6110 If so, our SUBREG_BYTE will be 0. */
6111 if (WORDS_BIG_ENDIAN)
6112 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6113 if (BYTES_BIG_ENDIAN)
6114 offset += difference % UNITS_PER_WORD;
6115 if (offset == final_offset)
6116 final_offset = 0;
6117 else
6118 return NULL_RTX;
6121 /* Recurse for further possible simplifications. */
6122 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6123 final_offset);
6124 if (newx)
6125 return newx;
6126 if (validate_subreg (outermode, innermostmode,
6127 SUBREG_REG (op), final_offset))
6129 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6130 if (SUBREG_PROMOTED_VAR_P (op)
6131 && SUBREG_PROMOTED_SIGN (op) >= 0
6132 && GET_MODE_CLASS (outermode) == MODE_INT
6133 && IN_RANGE (GET_MODE_SIZE (outermode),
6134 GET_MODE_SIZE (innermode),
6135 GET_MODE_SIZE (innermostmode))
6136 && subreg_lowpart_p (newx))
6138 SUBREG_PROMOTED_VAR_P (newx) = 1;
6139 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6141 return newx;
6143 return NULL_RTX;
6146 /* SUBREG of a hard register => just change the register number
6147 and/or mode. If the hard register is not valid in that mode,
6148 suppress this simplification. If the hard register is the stack,
6149 frame, or argument pointer, leave this as a SUBREG. */
6151 if (REG_P (op) && HARD_REGISTER_P (op))
6153 unsigned int regno, final_regno;
6155 regno = REGNO (op);
6156 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6157 if (HARD_REGISTER_NUM_P (final_regno))
6159 rtx x;
6160 int final_offset = byte;
6162 /* Adjust offset for paradoxical subregs. */
6163 if (byte == 0
6164 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6166 int difference = (GET_MODE_SIZE (innermode)
6167 - GET_MODE_SIZE (outermode));
6168 if (WORDS_BIG_ENDIAN)
6169 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6170 if (BYTES_BIG_ENDIAN)
6171 final_offset += difference % UNITS_PER_WORD;
6174 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6176 /* Propagate original regno. We don't have any way to specify
6177 the offset inside original regno, so do so only for lowpart.
6178 The information is used only by alias analysis that can not
6179 grog partial register anyway. */
6181 if (subreg_lowpart_offset (outermode, innermode) == byte)
6182 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6183 return x;
6187 /* If we have a SUBREG of a register that we are replacing and we are
6188 replacing it with a MEM, make a new MEM and try replacing the
6189 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6190 or if we would be widening it. */
6192 if (MEM_P (op)
6193 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6194 /* Allow splitting of volatile memory references in case we don't
6195 have instruction to move the whole thing. */
6196 && (! MEM_VOLATILE_P (op)
6197 || ! have_insn_for (SET, innermode))
6198 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6199 return adjust_address_nv (op, outermode, byte);
6201 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6202 of two parts. */
6203 if (GET_CODE (op) == CONCAT
6204 || GET_CODE (op) == VEC_CONCAT)
6206 unsigned int part_size, final_offset;
6207 rtx part, res;
6209 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6210 if (part_mode == VOIDmode)
6211 part_mode = GET_MODE_INNER (GET_MODE (op));
6212 part_size = GET_MODE_SIZE (part_mode);
6213 if (byte < part_size)
6215 part = XEXP (op, 0);
6216 final_offset = byte;
6218 else
6220 part = XEXP (op, 1);
6221 final_offset = byte - part_size;
6224 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6225 return NULL_RTX;
6227 part_mode = GET_MODE (part);
6228 if (part_mode == VOIDmode)
6229 part_mode = GET_MODE_INNER (GET_MODE (op));
6230 res = simplify_subreg (outermode, part, part_mode, final_offset);
6231 if (res)
6232 return res;
6233 if (validate_subreg (outermode, part_mode, part, final_offset))
6234 return gen_rtx_SUBREG (outermode, part, final_offset);
6235 return NULL_RTX;
6238 /* A SUBREG resulting from a zero extension may fold to zero if
6239 it extracts higher bits that the ZERO_EXTEND's source bits. */
6240 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6242 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6243 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6244 return CONST0_RTX (outermode);
6247 scalar_int_mode int_outermode, int_innermode;
6248 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6249 && is_a <scalar_int_mode> (innermode, &int_innermode)
6250 && (GET_MODE_PRECISION (int_outermode)
6251 < GET_MODE_PRECISION (int_innermode))
6252 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6254 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6255 if (tem)
6256 return tem;
6259 return NULL_RTX;
6262 /* Make a SUBREG operation or equivalent if it folds. */
6265 simplify_gen_subreg (machine_mode outermode, rtx op,
6266 machine_mode innermode, unsigned int byte)
6268 rtx newx;
6270 newx = simplify_subreg (outermode, op, innermode, byte);
6271 if (newx)
6272 return newx;
6274 if (GET_CODE (op) == SUBREG
6275 || GET_CODE (op) == CONCAT
6276 || GET_MODE (op) == VOIDmode)
6277 return NULL_RTX;
6279 if (validate_subreg (outermode, innermode, op, byte))
6280 return gen_rtx_SUBREG (outermode, op, byte);
6282 return NULL_RTX;
6285 /* Generates a subreg to get the least significant part of EXPR (in mode
6286 INNER_MODE) to OUTER_MODE. */
6289 lowpart_subreg (machine_mode outer_mode, rtx expr,
6290 machine_mode inner_mode)
6292 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6293 subreg_lowpart_offset (outer_mode, inner_mode));
6296 /* Simplify X, an rtx expression.
6298 Return the simplified expression or NULL if no simplifications
6299 were possible.
6301 This is the preferred entry point into the simplification routines;
6302 however, we still allow passes to call the more specific routines.
6304 Right now GCC has three (yes, three) major bodies of RTL simplification
6305 code that need to be unified.
6307 1. fold_rtx in cse.c. This code uses various CSE specific
6308 information to aid in RTL simplification.
6310 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6311 it uses combine specific information to aid in RTL
6312 simplification.
6314 3. The routines in this file.
6317 Long term we want to only have one body of simplification code; to
6318 get to that state I recommend the following steps:
6320 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6321 which are not pass dependent state into these routines.
6323 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6324 use this routine whenever possible.
6326 3. Allow for pass dependent state to be provided to these
6327 routines and add simplifications based on the pass dependent
6328 state. Remove code from cse.c & combine.c that becomes
6329 redundant/dead.
6331 It will take time, but ultimately the compiler will be easier to
6332 maintain and improve. It's totally silly that when we add a
6333 simplification that it needs to be added to 4 places (3 for RTL
6334 simplification and 1 for tree simplification. */
6337 simplify_rtx (const_rtx x)
6339 const enum rtx_code code = GET_CODE (x);
6340 const machine_mode mode = GET_MODE (x);
6342 switch (GET_RTX_CLASS (code))
6344 case RTX_UNARY:
6345 return simplify_unary_operation (code, mode,
6346 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6347 case RTX_COMM_ARITH:
6348 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6349 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6351 /* Fall through. */
6353 case RTX_BIN_ARITH:
6354 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6356 case RTX_TERNARY:
6357 case RTX_BITFIELD_OPS:
6358 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6359 XEXP (x, 0), XEXP (x, 1),
6360 XEXP (x, 2));
6362 case RTX_COMPARE:
6363 case RTX_COMM_COMPARE:
6364 return simplify_relational_operation (code, mode,
6365 ((GET_MODE (XEXP (x, 0))
6366 != VOIDmode)
6367 ? GET_MODE (XEXP (x, 0))
6368 : GET_MODE (XEXP (x, 1))),
6369 XEXP (x, 0),
6370 XEXP (x, 1));
6372 case RTX_EXTRA:
6373 if (code == SUBREG)
6374 return simplify_subreg (mode, SUBREG_REG (x),
6375 GET_MODE (SUBREG_REG (x)),
6376 SUBREG_BYTE (x));
6377 break;
6379 case RTX_OBJ:
6380 if (code == LO_SUM)
6382 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6383 if (GET_CODE (XEXP (x, 0)) == HIGH
6384 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6385 return XEXP (x, 1);
6387 break;
6389 default:
6390 break;
6392 return NULL;