[64/77] Add a scalar_mode class
[official-gcc.git] / gcc / simplify-rtx.c
blob109c01917ed27d0d8ae977fedf38caf2ff524b19
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
328 break;
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
340 rtx newx;
342 offset += MEM_OFFSET (x);
344 newx = DECL_RTL (decl);
346 if (MEM_P (newx))
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
375 return x;
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
385 rtx tem;
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
391 return gen_rtx_fmt_e (code, mode, op);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
400 rtx tem;
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
417 rtx tem;
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
443 if (__builtin_expect (fn != NULL, 0))
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
452 switch (GET_RTX_CLASS (code))
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
494 case RTX_EXTRA:
495 if (code == SUBREG)
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
505 break;
507 case RTX_OBJ:
508 if (code == MEM)
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
515 else if (code == LO_SUM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
534 break;
536 default:
537 break;
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
554 if (newvec == vec)
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
561 RTVEC_ELT (newvec, j) = op;
564 break;
566 case 'e':
567 if (XEXP (x, i))
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
577 break;
579 return newx;
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
617 should be used.
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
625 (and:DI X Y)
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 scalar_int_mode int_mode, int_op_mode, subreg_mode;
646 gcc_assert (precision <= op_precision);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
730 and C2. */
731 if (GET_CODE (op) == AND
732 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
734 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
735 && CONST_INT_P (XEXP (op, 1)))
737 rtx op0 = (XEXP (XEXP (op, 0), 0));
738 rtx shift_op = XEXP (XEXP (op, 0), 1);
739 rtx mask_op = XEXP (op, 1);
740 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
741 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
743 if (shift < precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode) >> shift) & mask)
747 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
748 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
749 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
751 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
752 return simplify_gen_binary (AND, mode, op0, mask_op);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
758 changing len. */
759 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
760 && REG_P (XEXP (op, 0))
761 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
762 && CONST_INT_P (XEXP (op, 1))
763 && CONST_INT_P (XEXP (op, 2)))
765 rtx op0 = XEXP (op, 0);
766 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
767 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
768 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
773 pos -= op_precision - precision;
774 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
775 XEXP (op, 1), GEN_INT (pos));
778 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
780 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
781 if (op0)
782 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
783 XEXP (op, 1), XEXP (op, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && SCALAR_INT_MODE_P (mode)
791 && SCALAR_INT_MODE_P (op_mode)
792 && precision >= BITS_PER_WORD
793 && 2 * precision <= op_precision
794 && CONST_INT_P (XEXP (op, 1))
795 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
796 && UINTVAL (XEXP (op, 1)) < op_precision)
798 int byte = subreg_lowpart_offset (mode, op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op) == LSHIFTRT
810 || GET_CODE (op) == ASHIFTRT)
811 && is_a <scalar_int_mode> (mode, &int_mode)
812 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
813 && MEM_P (XEXP (op, 0))
814 && CONST_INT_P (XEXP (op, 1))
815 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
816 && INTVAL (XEXP (op, 1)) > 0
817 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
818 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op, 0))
821 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
824 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
825 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
826 return adjust_address_nv (XEXP (op, 0), int_mode,
827 (WORDS_BIG_ENDIAN
828 ? byte - shifted_bytes
829 : byte + shifted_bytes));
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op) == ABS
835 || GET_CODE (op) == NEG)
836 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
838 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
839 return simplify_gen_unary (GET_CODE (op), mode,
840 XEXP (XEXP (op, 0), 0), mode);
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
843 (truncate:A X). */
844 if (GET_CODE (op) == SUBREG
845 && is_a <scalar_int_mode> (mode, &int_mode)
846 && SCALAR_INT_MODE_P (op_mode)
847 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
848 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
849 && subreg_lowpart_p (op))
851 rtx inner = XEXP (SUBREG_REG (op), 0);
852 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
853 return simplify_gen_unary (TRUNCATE, int_mode, inner,
854 GET_MODE (inner));
855 else
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
875 return NULL_RTX;
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_unary_operation (enum rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
885 rtx trueop, tem;
887 trueop = avoid_constant_pool_reference (op);
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
893 return simplify_unary_operation_1 (code, mode, op);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
919 return in_bits <= out_bits;
922 /* Perform some simplifications we can do even if the operands
923 aren't constant. */
924 static rtx
925 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
927 enum rtx_code reversed;
928 rtx temp;
929 scalar_int_mode inner, int_mode, op_mode, op0_mode;
931 switch (code)
933 case NOT:
934 /* (not (not X)) == X. */
935 if (GET_CODE (op) == NOT)
936 return XEXP (op, 0);
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op)
941 && (mode == BImode || STORE_FLAG_VALUE == -1)
942 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
943 return simplify_gen_relational (reversed, mode, VOIDmode,
944 XEXP (op, 0), XEXP (op, 1));
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == constm1_rtx)
949 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
955 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
956 CONSTM1_RTX (mode));
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op) == XOR
960 && CONST_INT_P (XEXP (op, 1))
961 && (temp = simplify_unary_operation (NOT, mode,
962 XEXP (op, 1), mode)) != 0)
963 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op) == PLUS
967 && CONST_INT_P (XEXP (op, 1))
968 && mode_signbit_p (mode, XEXP (op, 1))
969 && (temp = simplify_unary_operation (NOT, mode,
970 XEXP (op, 1), mode)) != 0)
971 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
978 bother with. */
979 if (GET_CODE (op) == ASHIFT
980 && XEXP (op, 0) == const1_rtx)
982 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
983 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE == -1
990 && is_a <scalar_int_mode> (mode, &int_mode)
991 && GET_CODE (op) == ASHIFTRT
992 && CONST_INT_P (XEXP (op, 1))
993 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
994 return simplify_gen_relational (GE, int_mode, VOIDmode,
995 XEXP (op, 0), const0_rtx);
998 if (GET_CODE (op) == SUBREG
999 && subreg_lowpart_p (op)
1000 && (GET_MODE_SIZE (GET_MODE (op))
1001 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
1002 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1003 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1005 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1006 rtx x;
1008 x = gen_rtx_ROTATE (inner_mode,
1009 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1010 inner_mode),
1011 XEXP (SUBREG_REG (op), 1));
1012 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1013 if (temp)
1014 return temp;
1017 /* Apply De Morgan's laws to reduce number of patterns for machines
1018 with negating logical insns (and-not, nand, etc.). If result has
1019 only one NOT, put it first, since that is how the patterns are
1020 coded. */
1021 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1023 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1024 machine_mode op_mode;
1026 op_mode = GET_MODE (in1);
1027 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1029 op_mode = GET_MODE (in2);
1030 if (op_mode == VOIDmode)
1031 op_mode = mode;
1032 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1034 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1035 std::swap (in1, in2);
1037 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1038 mode, in1, in2);
1041 /* (not (bswap x)) -> (bswap (not x)). */
1042 if (GET_CODE (op) == BSWAP)
1044 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1045 return simplify_gen_unary (BSWAP, mode, x, mode);
1047 break;
1049 case NEG:
1050 /* (neg (neg X)) == X. */
1051 if (GET_CODE (op) == NEG)
1052 return XEXP (op, 0);
1054 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1055 If comparison is not reversible use
1056 x ? y : (neg y). */
1057 if (GET_CODE (op) == IF_THEN_ELSE)
1059 rtx cond = XEXP (op, 0);
1060 rtx true_rtx = XEXP (op, 1);
1061 rtx false_rtx = XEXP (op, 2);
1063 if ((GET_CODE (true_rtx) == NEG
1064 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1065 || (GET_CODE (false_rtx) == NEG
1066 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1068 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1069 temp = reversed_comparison (cond, mode);
1070 else
1072 temp = cond;
1073 std::swap (true_rtx, false_rtx);
1075 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1076 mode, temp, true_rtx, false_rtx);
1080 /* (neg (plus X 1)) can become (not X). */
1081 if (GET_CODE (op) == PLUS
1082 && XEXP (op, 1) == const1_rtx)
1083 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1085 /* Similarly, (neg (not X)) is (plus X 1). */
1086 if (GET_CODE (op) == NOT)
1087 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1088 CONST1_RTX (mode));
1090 /* (neg (minus X Y)) can become (minus Y X). This transformation
1091 isn't safe for modes with signed zeros, since if X and Y are
1092 both +0, (minus Y X) is the same as (minus X Y). If the
1093 rounding mode is towards +infinity (or -infinity) then the two
1094 expressions will be rounded differently. */
1095 if (GET_CODE (op) == MINUS
1096 && !HONOR_SIGNED_ZEROS (mode)
1097 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1098 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1100 if (GET_CODE (op) == PLUS
1101 && !HONOR_SIGNED_ZEROS (mode)
1102 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1104 /* (neg (plus A C)) is simplified to (minus -C A). */
1105 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1106 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1108 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1109 if (temp)
1110 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1113 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1114 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1115 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1118 /* (neg (mult A B)) becomes (mult A (neg B)).
1119 This works even for floating-point values. */
1120 if (GET_CODE (op) == MULT
1121 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1123 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1124 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1127 /* NEG commutes with ASHIFT since it is multiplication. Only do
1128 this if we can then eliminate the NEG (e.g., if the operand
1129 is a constant). */
1130 if (GET_CODE (op) == ASHIFT)
1132 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1133 if (temp)
1134 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1137 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1138 C is equal to the width of MODE minus 1. */
1139 if (GET_CODE (op) == ASHIFTRT
1140 && CONST_INT_P (XEXP (op, 1))
1141 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1142 return simplify_gen_binary (LSHIFTRT, mode,
1143 XEXP (op, 0), XEXP (op, 1));
1145 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1146 C is equal to the width of MODE minus 1. */
1147 if (GET_CODE (op) == LSHIFTRT
1148 && CONST_INT_P (XEXP (op, 1))
1149 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1150 return simplify_gen_binary (ASHIFTRT, mode,
1151 XEXP (op, 0), XEXP (op, 1));
1153 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1154 if (GET_CODE (op) == XOR
1155 && XEXP (op, 1) == const1_rtx
1156 && nonzero_bits (XEXP (op, 0), mode) == 1)
1157 return plus_constant (mode, XEXP (op, 0), -1);
1159 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1160 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1161 if (GET_CODE (op) == LT
1162 && XEXP (op, 1) == const0_rtx
1163 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1165 int_mode = as_a <scalar_int_mode> (mode);
1166 int isize = GET_MODE_PRECISION (inner);
1167 if (STORE_FLAG_VALUE == 1)
1169 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1170 GEN_INT (isize - 1));
1171 if (int_mode == inner)
1172 return temp;
1173 if (GET_MODE_PRECISION (int_mode) > isize)
1174 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1175 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1177 else if (STORE_FLAG_VALUE == -1)
1179 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1180 GEN_INT (isize - 1));
1181 if (int_mode == inner)
1182 return temp;
1183 if (GET_MODE_PRECISION (int_mode) > isize)
1184 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1185 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1188 break;
1190 case TRUNCATE:
1191 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1192 with the umulXi3_highpart patterns. */
1193 if (GET_CODE (op) == LSHIFTRT
1194 && GET_CODE (XEXP (op, 0)) == MULT)
1195 break;
1197 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1199 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1201 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1202 if (temp)
1203 return temp;
1205 /* We can't handle truncation to a partial integer mode here
1206 because we don't know the real bitsize of the partial
1207 integer mode. */
1208 break;
1211 if (GET_MODE (op) != VOIDmode)
1213 temp = simplify_truncation (mode, op, GET_MODE (op));
1214 if (temp)
1215 return temp;
1218 /* If we know that the value is already truncated, we can
1219 replace the TRUNCATE with a SUBREG. */
1220 if (GET_MODE_NUNITS (mode) == 1
1221 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1222 || truncated_to_mode (mode, op)))
1224 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1225 if (temp)
1226 return temp;
1229 /* A truncate of a comparison can be replaced with a subreg if
1230 STORE_FLAG_VALUE permits. This is like the previous test,
1231 but it works even if the comparison is done in a mode larger
1232 than HOST_BITS_PER_WIDE_INT. */
1233 if (HWI_COMPUTABLE_MODE_P (mode)
1234 && COMPARISON_P (op)
1235 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1237 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1238 if (temp)
1239 return temp;
1242 /* A truncate of a memory is just loading the low part of the memory
1243 if we are not changing the meaning of the address. */
1244 if (GET_CODE (op) == MEM
1245 && !VECTOR_MODE_P (mode)
1246 && !MEM_VOLATILE_P (op)
1247 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1249 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1250 if (temp)
1251 return temp;
1254 break;
1256 case FLOAT_TRUNCATE:
1257 if (DECIMAL_FLOAT_MODE_P (mode))
1258 break;
1260 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1261 if (GET_CODE (op) == FLOAT_EXTEND
1262 && GET_MODE (XEXP (op, 0)) == mode)
1263 return XEXP (op, 0);
1265 /* (float_truncate:SF (float_truncate:DF foo:XF))
1266 = (float_truncate:SF foo:XF).
1267 This may eliminate double rounding, so it is unsafe.
1269 (float_truncate:SF (float_extend:XF foo:DF))
1270 = (float_truncate:SF foo:DF).
1272 (float_truncate:DF (float_extend:XF foo:SF))
1273 = (float_extend:DF foo:SF). */
1274 if ((GET_CODE (op) == FLOAT_TRUNCATE
1275 && flag_unsafe_math_optimizations)
1276 || GET_CODE (op) == FLOAT_EXTEND)
1277 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1278 0)))
1279 > GET_MODE_SIZE (mode)
1280 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1281 mode,
1282 XEXP (op, 0), mode);
1284 /* (float_truncate (float x)) is (float x) */
1285 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1286 && (flag_unsafe_math_optimizations
1287 || exact_int_to_float_conversion_p (op)))
1288 return simplify_gen_unary (GET_CODE (op), mode,
1289 XEXP (op, 0),
1290 GET_MODE (XEXP (op, 0)));
1292 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1293 (OP:SF foo:SF) if OP is NEG or ABS. */
1294 if ((GET_CODE (op) == ABS
1295 || GET_CODE (op) == NEG)
1296 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1297 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1298 return simplify_gen_unary (GET_CODE (op), mode,
1299 XEXP (XEXP (op, 0), 0), mode);
1301 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1302 is (float_truncate:SF x). */
1303 if (GET_CODE (op) == SUBREG
1304 && subreg_lowpart_p (op)
1305 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1306 return SUBREG_REG (op);
1307 break;
1309 case FLOAT_EXTEND:
1310 if (DECIMAL_FLOAT_MODE_P (mode))
1311 break;
1313 /* (float_extend (float_extend x)) is (float_extend x)
1315 (float_extend (float x)) is (float x) assuming that double
1316 rounding can't happen.
1318 if (GET_CODE (op) == FLOAT_EXTEND
1319 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1320 && exact_int_to_float_conversion_p (op)))
1321 return simplify_gen_unary (GET_CODE (op), mode,
1322 XEXP (op, 0),
1323 GET_MODE (XEXP (op, 0)));
1325 break;
1327 case ABS:
1328 /* (abs (neg <foo>)) -> (abs <foo>) */
1329 if (GET_CODE (op) == NEG)
1330 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1331 GET_MODE (XEXP (op, 0)));
1333 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1334 do nothing. */
1335 if (GET_MODE (op) == VOIDmode)
1336 break;
1338 /* If operand is something known to be positive, ignore the ABS. */
1339 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1340 || val_signbit_known_clear_p (GET_MODE (op),
1341 nonzero_bits (op, GET_MODE (op))))
1342 return op;
1344 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1345 if (is_a <scalar_int_mode> (mode, &int_mode)
1346 && (num_sign_bit_copies (op, int_mode)
1347 == GET_MODE_PRECISION (int_mode)))
1348 return gen_rtx_NEG (int_mode, op);
1350 break;
1352 case FFS:
1353 /* (ffs (*_extend <X>)) = (ffs <X>) */
1354 if (GET_CODE (op) == SIGN_EXTEND
1355 || GET_CODE (op) == ZERO_EXTEND)
1356 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1357 GET_MODE (XEXP (op, 0)));
1358 break;
1360 case POPCOUNT:
1361 switch (GET_CODE (op))
1363 case BSWAP:
1364 case ZERO_EXTEND:
1365 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1366 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1367 GET_MODE (XEXP (op, 0)));
1369 case ROTATE:
1370 case ROTATERT:
1371 /* Rotations don't affect popcount. */
1372 if (!side_effects_p (XEXP (op, 1)))
1373 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1374 GET_MODE (XEXP (op, 0)));
1375 break;
1377 default:
1378 break;
1380 break;
1382 case PARITY:
1383 switch (GET_CODE (op))
1385 case NOT:
1386 case BSWAP:
1387 case ZERO_EXTEND:
1388 case SIGN_EXTEND:
1389 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1390 GET_MODE (XEXP (op, 0)));
1392 case ROTATE:
1393 case ROTATERT:
1394 /* Rotations don't affect parity. */
1395 if (!side_effects_p (XEXP (op, 1)))
1396 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1397 GET_MODE (XEXP (op, 0)));
1398 break;
1400 default:
1401 break;
1403 break;
1405 case BSWAP:
1406 /* (bswap (bswap x)) -> x. */
1407 if (GET_CODE (op) == BSWAP)
1408 return XEXP (op, 0);
1409 break;
1411 case FLOAT:
1412 /* (float (sign_extend <X>)) = (float <X>). */
1413 if (GET_CODE (op) == SIGN_EXTEND)
1414 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1415 GET_MODE (XEXP (op, 0)));
1416 break;
1418 case SIGN_EXTEND:
1419 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1420 becomes just the MINUS if its mode is MODE. This allows
1421 folding switch statements on machines using casesi (such as
1422 the VAX). */
1423 if (GET_CODE (op) == TRUNCATE
1424 && GET_MODE (XEXP (op, 0)) == mode
1425 && GET_CODE (XEXP (op, 0)) == MINUS
1426 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1427 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1428 return XEXP (op, 0);
1430 /* Extending a widening multiplication should be canonicalized to
1431 a wider widening multiplication. */
1432 if (GET_CODE (op) == MULT)
1434 rtx lhs = XEXP (op, 0);
1435 rtx rhs = XEXP (op, 1);
1436 enum rtx_code lcode = GET_CODE (lhs);
1437 enum rtx_code rcode = GET_CODE (rhs);
1439 /* Widening multiplies usually extend both operands, but sometimes
1440 they use a shift to extract a portion of a register. */
1441 if ((lcode == SIGN_EXTEND
1442 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1443 && (rcode == SIGN_EXTEND
1444 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1446 machine_mode lmode = GET_MODE (lhs);
1447 machine_mode rmode = GET_MODE (rhs);
1448 int bits;
1450 if (lcode == ASHIFTRT)
1451 /* Number of bits not shifted off the end. */
1452 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1453 else /* lcode == SIGN_EXTEND */
1454 /* Size of inner mode. */
1455 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1457 if (rcode == ASHIFTRT)
1458 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1459 else /* rcode == SIGN_EXTEND */
1460 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1462 /* We can only widen multiplies if the result is mathematiclly
1463 equivalent. I.e. if overflow was impossible. */
1464 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1465 return simplify_gen_binary
1466 (MULT, mode,
1467 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1468 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1472 /* Check for a sign extension of a subreg of a promoted
1473 variable, where the promotion is sign-extended, and the
1474 target mode is the same as the variable's promotion. */
1475 if (GET_CODE (op) == SUBREG
1476 && SUBREG_PROMOTED_VAR_P (op)
1477 && SUBREG_PROMOTED_SIGNED_P (op)
1478 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1480 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1481 if (temp)
1482 return temp;
1485 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1486 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1487 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1489 gcc_assert (GET_MODE_PRECISION (mode)
1490 > GET_MODE_PRECISION (GET_MODE (op)));
1491 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1492 GET_MODE (XEXP (op, 0)));
1495 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1496 is (sign_extend:M (subreg:O <X>)) if there is mode with
1497 GET_MODE_BITSIZE (N) - I bits.
1498 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1499 is similarly (zero_extend:M (subreg:O <X>)). */
1500 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1501 && GET_CODE (XEXP (op, 0)) == ASHIFT
1502 && is_a <scalar_int_mode> (mode, &int_mode)
1503 && CONST_INT_P (XEXP (op, 1))
1504 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1505 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1506 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1508 scalar_int_mode tmode;
1509 gcc_assert (GET_MODE_BITSIZE (int_mode)
1510 > GET_MODE_BITSIZE (op_mode));
1511 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1512 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1514 rtx inner =
1515 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1516 if (inner)
1517 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1518 ? SIGN_EXTEND : ZERO_EXTEND,
1519 int_mode, inner, tmode);
1523 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1524 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1525 if (GET_CODE (op) == LSHIFTRT
1526 && CONST_INT_P (XEXP (op, 1))
1527 && XEXP (op, 1) != const0_rtx)
1528 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1530 #if defined(POINTERS_EXTEND_UNSIGNED)
1531 /* As we do not know which address space the pointer is referring to,
1532 we can do this only if the target does not support different pointer
1533 or address modes depending on the address space. */
1534 if (target_default_pointer_address_modes_p ()
1535 && ! POINTERS_EXTEND_UNSIGNED
1536 && mode == Pmode && GET_MODE (op) == ptr_mode
1537 && (CONSTANT_P (op)
1538 || (GET_CODE (op) == SUBREG
1539 && REG_P (SUBREG_REG (op))
1540 && REG_POINTER (SUBREG_REG (op))
1541 && GET_MODE (SUBREG_REG (op)) == Pmode))
1542 && !targetm.have_ptr_extend ())
1544 temp
1545 = convert_memory_address_addr_space_1 (Pmode, op,
1546 ADDR_SPACE_GENERIC, false,
1547 true);
1548 if (temp)
1549 return temp;
1551 #endif
1552 break;
1554 case ZERO_EXTEND:
1555 /* Check for a zero extension of a subreg of a promoted
1556 variable, where the promotion is zero-extended, and the
1557 target mode is the same as the variable's promotion. */
1558 if (GET_CODE (op) == SUBREG
1559 && SUBREG_PROMOTED_VAR_P (op)
1560 && SUBREG_PROMOTED_UNSIGNED_P (op)
1561 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1563 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1564 if (temp)
1565 return temp;
1568 /* Extending a widening multiplication should be canonicalized to
1569 a wider widening multiplication. */
1570 if (GET_CODE (op) == MULT)
1572 rtx lhs = XEXP (op, 0);
1573 rtx rhs = XEXP (op, 1);
1574 enum rtx_code lcode = GET_CODE (lhs);
1575 enum rtx_code rcode = GET_CODE (rhs);
1577 /* Widening multiplies usually extend both operands, but sometimes
1578 they use a shift to extract a portion of a register. */
1579 if ((lcode == ZERO_EXTEND
1580 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1581 && (rcode == ZERO_EXTEND
1582 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1584 machine_mode lmode = GET_MODE (lhs);
1585 machine_mode rmode = GET_MODE (rhs);
1586 int bits;
1588 if (lcode == LSHIFTRT)
1589 /* Number of bits not shifted off the end. */
1590 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1591 else /* lcode == ZERO_EXTEND */
1592 /* Size of inner mode. */
1593 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1595 if (rcode == LSHIFTRT)
1596 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1597 else /* rcode == ZERO_EXTEND */
1598 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1600 /* We can only widen multiplies if the result is mathematiclly
1601 equivalent. I.e. if overflow was impossible. */
1602 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1603 return simplify_gen_binary
1604 (MULT, mode,
1605 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1606 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1610 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1611 if (GET_CODE (op) == ZERO_EXTEND)
1612 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1613 GET_MODE (XEXP (op, 0)));
1615 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1616 is (zero_extend:M (subreg:O <X>)) if there is mode with
1617 GET_MODE_PRECISION (N) - I bits. */
1618 if (GET_CODE (op) == LSHIFTRT
1619 && GET_CODE (XEXP (op, 0)) == ASHIFT
1620 && is_a <scalar_int_mode> (mode, &int_mode)
1621 && CONST_INT_P (XEXP (op, 1))
1622 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1623 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1624 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1626 scalar_int_mode tmode;
1627 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1628 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1630 rtx inner =
1631 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1632 if (inner)
1633 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1634 inner, tmode);
1638 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1639 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1640 of mode N. E.g.
1641 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1642 (and:SI (reg:SI) (const_int 63)). */
1643 if (GET_CODE (op) == SUBREG
1644 && is_a <scalar_int_mode> (mode, &int_mode)
1645 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1646 && GET_MODE_PRECISION (GET_MODE (op)) < GET_MODE_PRECISION (op0_mode)
1647 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1648 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1649 && subreg_lowpart_p (op)
1650 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1651 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1653 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1654 return SUBREG_REG (op);
1655 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1656 op0_mode);
1659 #if defined(POINTERS_EXTEND_UNSIGNED)
1660 /* As we do not know which address space the pointer is referring to,
1661 we can do this only if the target does not support different pointer
1662 or address modes depending on the address space. */
1663 if (target_default_pointer_address_modes_p ()
1664 && POINTERS_EXTEND_UNSIGNED > 0
1665 && mode == Pmode && GET_MODE (op) == ptr_mode
1666 && (CONSTANT_P (op)
1667 || (GET_CODE (op) == SUBREG
1668 && REG_P (SUBREG_REG (op))
1669 && REG_POINTER (SUBREG_REG (op))
1670 && GET_MODE (SUBREG_REG (op)) == Pmode))
1671 && !targetm.have_ptr_extend ())
1673 temp
1674 = convert_memory_address_addr_space_1 (Pmode, op,
1675 ADDR_SPACE_GENERIC, false,
1676 true);
1677 if (temp)
1678 return temp;
1680 #endif
1681 break;
1683 default:
1684 break;
1687 return 0;
1690 /* Try to compute the value of a unary operation CODE whose output mode is to
1691 be MODE with input operand OP whose mode was originally OP_MODE.
1692 Return zero if the value cannot be computed. */
1694 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1695 rtx op, machine_mode op_mode)
1697 scalar_int_mode result_mode;
1699 if (code == VEC_DUPLICATE)
1701 gcc_assert (VECTOR_MODE_P (mode));
1702 if (GET_MODE (op) != VOIDmode)
1704 if (!VECTOR_MODE_P (GET_MODE (op)))
1705 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1706 else
1707 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1708 (GET_MODE (op)));
1710 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1711 || GET_CODE (op) == CONST_VECTOR)
1713 int elt_size = GET_MODE_UNIT_SIZE (mode);
1714 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1715 rtvec v = rtvec_alloc (n_elts);
1716 unsigned int i;
1718 if (GET_CODE (op) != CONST_VECTOR)
1719 for (i = 0; i < n_elts; i++)
1720 RTVEC_ELT (v, i) = op;
1721 else
1723 machine_mode inmode = GET_MODE (op);
1724 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1725 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1727 gcc_assert (in_n_elts < n_elts);
1728 gcc_assert ((n_elts % in_n_elts) == 0);
1729 for (i = 0; i < n_elts; i++)
1730 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1732 return gen_rtx_CONST_VECTOR (mode, v);
1736 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1738 int elt_size = GET_MODE_UNIT_SIZE (mode);
1739 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1740 machine_mode opmode = GET_MODE (op);
1741 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1742 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1743 rtvec v = rtvec_alloc (n_elts);
1744 unsigned int i;
1746 gcc_assert (op_n_elts == n_elts);
1747 for (i = 0; i < n_elts; i++)
1749 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1750 CONST_VECTOR_ELT (op, i),
1751 GET_MODE_INNER (opmode));
1752 if (!x)
1753 return 0;
1754 RTVEC_ELT (v, i) = x;
1756 return gen_rtx_CONST_VECTOR (mode, v);
1759 /* The order of these tests is critical so that, for example, we don't
1760 check the wrong mode (input vs. output) for a conversion operation,
1761 such as FIX. At some point, this should be simplified. */
1763 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1765 REAL_VALUE_TYPE d;
1767 if (op_mode == VOIDmode)
1769 /* CONST_INT have VOIDmode as the mode. We assume that all
1770 the bits of the constant are significant, though, this is
1771 a dangerous assumption as many times CONST_INTs are
1772 created and used with garbage in the bits outside of the
1773 precision of the implied mode of the const_int. */
1774 op_mode = MAX_MODE_INT;
1777 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1779 /* Avoid the folding if flag_signaling_nans is on and
1780 operand is a signaling NaN. */
1781 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1782 return 0;
1784 d = real_value_truncate (mode, d);
1785 return const_double_from_real_value (d, mode);
1787 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1789 REAL_VALUE_TYPE d;
1791 if (op_mode == VOIDmode)
1793 /* CONST_INT have VOIDmode as the mode. We assume that all
1794 the bits of the constant are significant, though, this is
1795 a dangerous assumption as many times CONST_INTs are
1796 created and used with garbage in the bits outside of the
1797 precision of the implied mode of the const_int. */
1798 op_mode = MAX_MODE_INT;
1801 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1803 /* Avoid the folding if flag_signaling_nans is on and
1804 operand is a signaling NaN. */
1805 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1806 return 0;
1808 d = real_value_truncate (mode, d);
1809 return const_double_from_real_value (d, mode);
1812 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1814 unsigned int width = GET_MODE_PRECISION (result_mode);
1815 wide_int result;
1816 scalar_int_mode imode = (op_mode == VOIDmode
1817 ? result_mode
1818 : as_a <scalar_int_mode> (op_mode));
1819 rtx_mode_t op0 = rtx_mode_t (op, imode);
1820 int int_value;
1822 #if TARGET_SUPPORTS_WIDE_INT == 0
1823 /* This assert keeps the simplification from producing a result
1824 that cannot be represented in a CONST_DOUBLE but a lot of
1825 upstream callers expect that this function never fails to
1826 simplify something and so you if you added this to the test
1827 above the code would die later anyway. If this assert
1828 happens, you just need to make the port support wide int. */
1829 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1830 #endif
1832 switch (code)
1834 case NOT:
1835 result = wi::bit_not (op0);
1836 break;
1838 case NEG:
1839 result = wi::neg (op0);
1840 break;
1842 case ABS:
1843 result = wi::abs (op0);
1844 break;
1846 case FFS:
1847 result = wi::shwi (wi::ffs (op0), result_mode);
1848 break;
1850 case CLZ:
1851 if (wi::ne_p (op0, 0))
1852 int_value = wi::clz (op0);
1853 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1854 int_value = GET_MODE_PRECISION (imode);
1855 result = wi::shwi (int_value, result_mode);
1856 break;
1858 case CLRSB:
1859 result = wi::shwi (wi::clrsb (op0), result_mode);
1860 break;
1862 case CTZ:
1863 if (wi::ne_p (op0, 0))
1864 int_value = wi::ctz (op0);
1865 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1866 int_value = GET_MODE_PRECISION (imode);
1867 result = wi::shwi (int_value, result_mode);
1868 break;
1870 case POPCOUNT:
1871 result = wi::shwi (wi::popcount (op0), result_mode);
1872 break;
1874 case PARITY:
1875 result = wi::shwi (wi::parity (op0), result_mode);
1876 break;
1878 case BSWAP:
1879 result = wide_int (op0).bswap ();
1880 break;
1882 case TRUNCATE:
1883 case ZERO_EXTEND:
1884 result = wide_int::from (op0, width, UNSIGNED);
1885 break;
1887 case SIGN_EXTEND:
1888 result = wide_int::from (op0, width, SIGNED);
1889 break;
1891 case SQRT:
1892 default:
1893 return 0;
1896 return immed_wide_int_const (result, result_mode);
1899 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1900 && SCALAR_FLOAT_MODE_P (mode)
1901 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1903 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1904 switch (code)
1906 case SQRT:
1907 return 0;
1908 case ABS:
1909 d = real_value_abs (&d);
1910 break;
1911 case NEG:
1912 d = real_value_negate (&d);
1913 break;
1914 case FLOAT_TRUNCATE:
1915 /* Don't perform the operation if flag_signaling_nans is on
1916 and the operand is a signaling NaN. */
1917 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1918 return NULL_RTX;
1919 d = real_value_truncate (mode, d);
1920 break;
1921 case FLOAT_EXTEND:
1922 /* Don't perform the operation if flag_signaling_nans is on
1923 and the operand is a signaling NaN. */
1924 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1925 return NULL_RTX;
1926 /* All this does is change the mode, unless changing
1927 mode class. */
1928 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1929 real_convert (&d, mode, &d);
1930 break;
1931 case FIX:
1932 /* Don't perform the operation if flag_signaling_nans is on
1933 and the operand is a signaling NaN. */
1934 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1935 return NULL_RTX;
1936 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1937 break;
1938 case NOT:
1940 long tmp[4];
1941 int i;
1943 real_to_target (tmp, &d, GET_MODE (op));
1944 for (i = 0; i < 4; i++)
1945 tmp[i] = ~tmp[i];
1946 real_from_target (&d, tmp, mode);
1947 break;
1949 default:
1950 gcc_unreachable ();
1952 return const_double_from_real_value (d, mode);
1954 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1955 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1956 && is_int_mode (mode, &result_mode))
1958 unsigned int width = GET_MODE_PRECISION (result_mode);
1959 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1960 operators are intentionally left unspecified (to ease implementation
1961 by target backends), for consistency, this routine implements the
1962 same semantics for constant folding as used by the middle-end. */
1964 /* This was formerly used only for non-IEEE float.
1965 eggert@twinsun.com says it is safe for IEEE also. */
1966 REAL_VALUE_TYPE t;
1967 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1968 wide_int wmax, wmin;
1969 /* This is part of the abi to real_to_integer, but we check
1970 things before making this call. */
1971 bool fail;
1973 switch (code)
1975 case FIX:
1976 if (REAL_VALUE_ISNAN (*x))
1977 return const0_rtx;
1979 /* Test against the signed upper bound. */
1980 wmax = wi::max_value (width, SIGNED);
1981 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1982 if (real_less (&t, x))
1983 return immed_wide_int_const (wmax, mode);
1985 /* Test against the signed lower bound. */
1986 wmin = wi::min_value (width, SIGNED);
1987 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1988 if (real_less (x, &t))
1989 return immed_wide_int_const (wmin, mode);
1991 return immed_wide_int_const (real_to_integer (x, &fail, width),
1992 mode);
1994 case UNSIGNED_FIX:
1995 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1996 return const0_rtx;
1998 /* Test against the unsigned upper bound. */
1999 wmax = wi::max_value (width, UNSIGNED);
2000 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2001 if (real_less (&t, x))
2002 return immed_wide_int_const (wmax, mode);
2004 return immed_wide_int_const (real_to_integer (x, &fail, width),
2005 mode);
2007 default:
2008 gcc_unreachable ();
2012 return NULL_RTX;
2015 /* Subroutine of simplify_binary_operation to simplify a binary operation
2016 CODE that can commute with byte swapping, with result mode MODE and
2017 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2018 Return zero if no simplification or canonicalization is possible. */
2020 static rtx
2021 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2022 rtx op0, rtx op1)
2024 rtx tem;
2026 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2027 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2029 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2030 simplify_gen_unary (BSWAP, mode, op1, mode));
2031 return simplify_gen_unary (BSWAP, mode, tem, mode);
2034 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2035 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2037 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2038 return simplify_gen_unary (BSWAP, mode, tem, mode);
2041 return NULL_RTX;
2044 /* Subroutine of simplify_binary_operation to simplify a commutative,
2045 associative binary operation CODE with result mode MODE, operating
2046 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2047 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2048 canonicalization is possible. */
2050 static rtx
2051 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2052 rtx op0, rtx op1)
2054 rtx tem;
2056 /* Linearize the operator to the left. */
2057 if (GET_CODE (op1) == code)
2059 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2060 if (GET_CODE (op0) == code)
2062 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2063 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2066 /* "a op (b op c)" becomes "(b op c) op a". */
2067 if (! swap_commutative_operands_p (op1, op0))
2068 return simplify_gen_binary (code, mode, op1, op0);
2070 std::swap (op0, op1);
2073 if (GET_CODE (op0) == code)
2075 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2076 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2078 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2079 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2082 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2083 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2084 if (tem != 0)
2085 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2087 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2088 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2089 if (tem != 0)
2090 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2093 return 0;
2097 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2098 and OP1. Return 0 if no simplification is possible.
2100 Don't use this for relational operations such as EQ or LT.
2101 Use simplify_relational_operation instead. */
2103 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2104 rtx op0, rtx op1)
2106 rtx trueop0, trueop1;
2107 rtx tem;
2109 /* Relational operations don't work here. We must know the mode
2110 of the operands in order to do the comparison correctly.
2111 Assuming a full word can give incorrect results.
2112 Consider comparing 128 with -128 in QImode. */
2113 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2114 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2116 /* Make sure the constant is second. */
2117 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2118 && swap_commutative_operands_p (op0, op1))
2119 std::swap (op0, op1);
2121 trueop0 = avoid_constant_pool_reference (op0);
2122 trueop1 = avoid_constant_pool_reference (op1);
2124 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2125 if (tem)
2126 return tem;
2127 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2129 if (tem)
2130 return tem;
2132 /* If the above steps did not result in a simplification and op0 or op1
2133 were constant pool references, use the referenced constants directly. */
2134 if (trueop0 != op0 || trueop1 != op1)
2135 return simplify_gen_binary (code, mode, trueop0, trueop1);
2137 return NULL_RTX;
2140 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2141 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2142 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2143 actual constants. */
2145 static rtx
2146 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2147 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2149 rtx tem, reversed, opleft, opright;
2150 HOST_WIDE_INT val;
2151 unsigned int width = GET_MODE_PRECISION (mode);
2152 scalar_int_mode int_mode, inner_mode;
2154 /* Even if we can't compute a constant result,
2155 there are some cases worth simplifying. */
2157 switch (code)
2159 case PLUS:
2160 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2161 when x is NaN, infinite, or finite and nonzero. They aren't
2162 when x is -0 and the rounding mode is not towards -infinity,
2163 since (-0) + 0 is then 0. */
2164 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2165 return op0;
2167 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2168 transformations are safe even for IEEE. */
2169 if (GET_CODE (op0) == NEG)
2170 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2171 else if (GET_CODE (op1) == NEG)
2172 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2174 /* (~a) + 1 -> -a */
2175 if (INTEGRAL_MODE_P (mode)
2176 && GET_CODE (op0) == NOT
2177 && trueop1 == const1_rtx)
2178 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2180 /* Handle both-operands-constant cases. We can only add
2181 CONST_INTs to constants since the sum of relocatable symbols
2182 can't be handled by most assemblers. Don't add CONST_INT
2183 to CONST_INT since overflow won't be computed properly if wider
2184 than HOST_BITS_PER_WIDE_INT. */
2186 if ((GET_CODE (op0) == CONST
2187 || GET_CODE (op0) == SYMBOL_REF
2188 || GET_CODE (op0) == LABEL_REF)
2189 && CONST_INT_P (op1))
2190 return plus_constant (mode, op0, INTVAL (op1));
2191 else if ((GET_CODE (op1) == CONST
2192 || GET_CODE (op1) == SYMBOL_REF
2193 || GET_CODE (op1) == LABEL_REF)
2194 && CONST_INT_P (op0))
2195 return plus_constant (mode, op1, INTVAL (op0));
2197 /* See if this is something like X * C - X or vice versa or
2198 if the multiplication is written as a shift. If so, we can
2199 distribute and make a new multiply, shift, or maybe just
2200 have X (if C is 2 in the example above). But don't make
2201 something more expensive than we had before. */
2203 if (is_a <scalar_int_mode> (mode, &int_mode))
2205 rtx lhs = op0, rhs = op1;
2207 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2208 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2210 if (GET_CODE (lhs) == NEG)
2212 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2213 lhs = XEXP (lhs, 0);
2215 else if (GET_CODE (lhs) == MULT
2216 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2218 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2219 lhs = XEXP (lhs, 0);
2221 else if (GET_CODE (lhs) == ASHIFT
2222 && CONST_INT_P (XEXP (lhs, 1))
2223 && INTVAL (XEXP (lhs, 1)) >= 0
2224 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2226 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2227 GET_MODE_PRECISION (int_mode));
2228 lhs = XEXP (lhs, 0);
2231 if (GET_CODE (rhs) == NEG)
2233 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2234 rhs = XEXP (rhs, 0);
2236 else if (GET_CODE (rhs) == MULT
2237 && CONST_INT_P (XEXP (rhs, 1)))
2239 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2240 rhs = XEXP (rhs, 0);
2242 else if (GET_CODE (rhs) == ASHIFT
2243 && CONST_INT_P (XEXP (rhs, 1))
2244 && INTVAL (XEXP (rhs, 1)) >= 0
2245 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2247 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2248 GET_MODE_PRECISION (int_mode));
2249 rhs = XEXP (rhs, 0);
2252 if (rtx_equal_p (lhs, rhs))
2254 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2255 rtx coeff;
2256 bool speed = optimize_function_for_speed_p (cfun);
2258 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2260 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2261 return (set_src_cost (tem, int_mode, speed)
2262 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2266 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2267 if (CONST_SCALAR_INT_P (op1)
2268 && GET_CODE (op0) == XOR
2269 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2270 && mode_signbit_p (mode, op1))
2271 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2272 simplify_gen_binary (XOR, mode, op1,
2273 XEXP (op0, 1)));
2275 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2276 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2277 && GET_CODE (op0) == MULT
2278 && GET_CODE (XEXP (op0, 0)) == NEG)
2280 rtx in1, in2;
2282 in1 = XEXP (XEXP (op0, 0), 0);
2283 in2 = XEXP (op0, 1);
2284 return simplify_gen_binary (MINUS, mode, op1,
2285 simplify_gen_binary (MULT, mode,
2286 in1, in2));
2289 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2290 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2291 is 1. */
2292 if (COMPARISON_P (op0)
2293 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2294 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2295 && (reversed = reversed_comparison (op0, mode)))
2296 return
2297 simplify_gen_unary (NEG, mode, reversed, mode);
2299 /* If one of the operands is a PLUS or a MINUS, see if we can
2300 simplify this by the associative law.
2301 Don't use the associative law for floating point.
2302 The inaccuracy makes it nonassociative,
2303 and subtle programs can break if operations are associated. */
2305 if (INTEGRAL_MODE_P (mode)
2306 && (plus_minus_operand_p (op0)
2307 || plus_minus_operand_p (op1))
2308 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2309 return tem;
2311 /* Reassociate floating point addition only when the user
2312 specifies associative math operations. */
2313 if (FLOAT_MODE_P (mode)
2314 && flag_associative_math)
2316 tem = simplify_associative_operation (code, mode, op0, op1);
2317 if (tem)
2318 return tem;
2320 break;
2322 case COMPARE:
2323 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2324 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2325 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2326 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2328 rtx xop00 = XEXP (op0, 0);
2329 rtx xop10 = XEXP (op1, 0);
2331 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2332 return xop00;
2334 if (REG_P (xop00) && REG_P (xop10)
2335 && REGNO (xop00) == REGNO (xop10)
2336 && GET_MODE (xop00) == mode
2337 && GET_MODE (xop10) == mode
2338 && GET_MODE_CLASS (mode) == MODE_CC)
2339 return xop00;
2341 break;
2343 case MINUS:
2344 /* We can't assume x-x is 0 even with non-IEEE floating point,
2345 but since it is zero except in very strange circumstances, we
2346 will treat it as zero with -ffinite-math-only. */
2347 if (rtx_equal_p (trueop0, trueop1)
2348 && ! side_effects_p (op0)
2349 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2350 return CONST0_RTX (mode);
2352 /* Change subtraction from zero into negation. (0 - x) is the
2353 same as -x when x is NaN, infinite, or finite and nonzero.
2354 But if the mode has signed zeros, and does not round towards
2355 -infinity, then 0 - 0 is 0, not -0. */
2356 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2357 return simplify_gen_unary (NEG, mode, op1, mode);
2359 /* (-1 - a) is ~a, unless the expression contains symbolic
2360 constants, in which case not retaining additions and
2361 subtractions could cause invalid assembly to be produced. */
2362 if (trueop0 == constm1_rtx
2363 && !contains_symbolic_reference_p (op1))
2364 return simplify_gen_unary (NOT, mode, op1, mode);
2366 /* Subtracting 0 has no effect unless the mode has signed zeros
2367 and supports rounding towards -infinity. In such a case,
2368 0 - 0 is -0. */
2369 if (!(HONOR_SIGNED_ZEROS (mode)
2370 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2371 && trueop1 == CONST0_RTX (mode))
2372 return op0;
2374 /* See if this is something like X * C - X or vice versa or
2375 if the multiplication is written as a shift. If so, we can
2376 distribute and make a new multiply, shift, or maybe just
2377 have X (if C is 2 in the example above). But don't make
2378 something more expensive than we had before. */
2380 if (is_a <scalar_int_mode> (mode, &int_mode))
2382 rtx lhs = op0, rhs = op1;
2384 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2385 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2387 if (GET_CODE (lhs) == NEG)
2389 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2390 lhs = XEXP (lhs, 0);
2392 else if (GET_CODE (lhs) == MULT
2393 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2395 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2396 lhs = XEXP (lhs, 0);
2398 else if (GET_CODE (lhs) == ASHIFT
2399 && CONST_INT_P (XEXP (lhs, 1))
2400 && INTVAL (XEXP (lhs, 1)) >= 0
2401 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2403 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2404 GET_MODE_PRECISION (int_mode));
2405 lhs = XEXP (lhs, 0);
2408 if (GET_CODE (rhs) == NEG)
2410 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2411 rhs = XEXP (rhs, 0);
2413 else if (GET_CODE (rhs) == MULT
2414 && CONST_INT_P (XEXP (rhs, 1)))
2416 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2417 rhs = XEXP (rhs, 0);
2419 else if (GET_CODE (rhs) == ASHIFT
2420 && CONST_INT_P (XEXP (rhs, 1))
2421 && INTVAL (XEXP (rhs, 1)) >= 0
2422 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2424 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2425 GET_MODE_PRECISION (int_mode));
2426 negcoeff1 = -negcoeff1;
2427 rhs = XEXP (rhs, 0);
2430 if (rtx_equal_p (lhs, rhs))
2432 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2433 rtx coeff;
2434 bool speed = optimize_function_for_speed_p (cfun);
2436 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2438 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2439 return (set_src_cost (tem, int_mode, speed)
2440 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2444 /* (a - (-b)) -> (a + b). True even for IEEE. */
2445 if (GET_CODE (op1) == NEG)
2446 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2448 /* (-x - c) may be simplified as (-c - x). */
2449 if (GET_CODE (op0) == NEG
2450 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2452 tem = simplify_unary_operation (NEG, mode, op1, mode);
2453 if (tem)
2454 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2457 /* Don't let a relocatable value get a negative coeff. */
2458 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2459 return simplify_gen_binary (PLUS, mode,
2460 op0,
2461 neg_const_int (mode, op1));
2463 /* (x - (x & y)) -> (x & ~y) */
2464 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2466 if (rtx_equal_p (op0, XEXP (op1, 0)))
2468 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2469 GET_MODE (XEXP (op1, 1)));
2470 return simplify_gen_binary (AND, mode, op0, tem);
2472 if (rtx_equal_p (op0, XEXP (op1, 1)))
2474 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2475 GET_MODE (XEXP (op1, 0)));
2476 return simplify_gen_binary (AND, mode, op0, tem);
2480 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2481 by reversing the comparison code if valid. */
2482 if (STORE_FLAG_VALUE == 1
2483 && trueop0 == const1_rtx
2484 && COMPARISON_P (op1)
2485 && (reversed = reversed_comparison (op1, mode)))
2486 return reversed;
2488 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2489 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2490 && GET_CODE (op1) == MULT
2491 && GET_CODE (XEXP (op1, 0)) == NEG)
2493 rtx in1, in2;
2495 in1 = XEXP (XEXP (op1, 0), 0);
2496 in2 = XEXP (op1, 1);
2497 return simplify_gen_binary (PLUS, mode,
2498 simplify_gen_binary (MULT, mode,
2499 in1, in2),
2500 op0);
2503 /* Canonicalize (minus (neg A) (mult B C)) to
2504 (minus (mult (neg B) C) A). */
2505 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2506 && GET_CODE (op1) == MULT
2507 && GET_CODE (op0) == NEG)
2509 rtx in1, in2;
2511 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2512 in2 = XEXP (op1, 1);
2513 return simplify_gen_binary (MINUS, mode,
2514 simplify_gen_binary (MULT, mode,
2515 in1, in2),
2516 XEXP (op0, 0));
2519 /* If one of the operands is a PLUS or a MINUS, see if we can
2520 simplify this by the associative law. This will, for example,
2521 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2522 Don't use the associative law for floating point.
2523 The inaccuracy makes it nonassociative,
2524 and subtle programs can break if operations are associated. */
2526 if (INTEGRAL_MODE_P (mode)
2527 && (plus_minus_operand_p (op0)
2528 || plus_minus_operand_p (op1))
2529 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2530 return tem;
2531 break;
2533 case MULT:
2534 if (trueop1 == constm1_rtx)
2535 return simplify_gen_unary (NEG, mode, op0, mode);
2537 if (GET_CODE (op0) == NEG)
2539 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2540 /* If op1 is a MULT as well and simplify_unary_operation
2541 just moved the NEG to the second operand, simplify_gen_binary
2542 below could through simplify_associative_operation move
2543 the NEG around again and recurse endlessly. */
2544 if (temp
2545 && GET_CODE (op1) == MULT
2546 && GET_CODE (temp) == MULT
2547 && XEXP (op1, 0) == XEXP (temp, 0)
2548 && GET_CODE (XEXP (temp, 1)) == NEG
2549 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2550 temp = NULL_RTX;
2551 if (temp)
2552 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2554 if (GET_CODE (op1) == NEG)
2556 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2557 /* If op0 is a MULT as well and simplify_unary_operation
2558 just moved the NEG to the second operand, simplify_gen_binary
2559 below could through simplify_associative_operation move
2560 the NEG around again and recurse endlessly. */
2561 if (temp
2562 && GET_CODE (op0) == MULT
2563 && GET_CODE (temp) == MULT
2564 && XEXP (op0, 0) == XEXP (temp, 0)
2565 && GET_CODE (XEXP (temp, 1)) == NEG
2566 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2567 temp = NULL_RTX;
2568 if (temp)
2569 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2572 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2573 x is NaN, since x * 0 is then also NaN. Nor is it valid
2574 when the mode has signed zeros, since multiplying a negative
2575 number by 0 will give -0, not 0. */
2576 if (!HONOR_NANS (mode)
2577 && !HONOR_SIGNED_ZEROS (mode)
2578 && trueop1 == CONST0_RTX (mode)
2579 && ! side_effects_p (op0))
2580 return op1;
2582 /* In IEEE floating point, x*1 is not equivalent to x for
2583 signalling NaNs. */
2584 if (!HONOR_SNANS (mode)
2585 && trueop1 == CONST1_RTX (mode))
2586 return op0;
2588 /* Convert multiply by constant power of two into shift. */
2589 if (CONST_SCALAR_INT_P (trueop1))
2591 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2592 if (val >= 0)
2593 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2596 /* x*2 is x+x and x*(-1) is -x */
2597 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2598 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2599 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2600 && GET_MODE (op0) == mode)
2602 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2604 if (real_equal (d1, &dconst2))
2605 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2607 if (!HONOR_SNANS (mode)
2608 && real_equal (d1, &dconstm1))
2609 return simplify_gen_unary (NEG, mode, op0, mode);
2612 /* Optimize -x * -x as x * x. */
2613 if (FLOAT_MODE_P (mode)
2614 && GET_CODE (op0) == NEG
2615 && GET_CODE (op1) == NEG
2616 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2617 && !side_effects_p (XEXP (op0, 0)))
2618 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2620 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2621 if (SCALAR_FLOAT_MODE_P (mode)
2622 && GET_CODE (op0) == ABS
2623 && GET_CODE (op1) == ABS
2624 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2625 && !side_effects_p (XEXP (op0, 0)))
2626 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2628 /* Reassociate multiplication, but for floating point MULTs
2629 only when the user specifies unsafe math optimizations. */
2630 if (! FLOAT_MODE_P (mode)
2631 || flag_unsafe_math_optimizations)
2633 tem = simplify_associative_operation (code, mode, op0, op1);
2634 if (tem)
2635 return tem;
2637 break;
2639 case IOR:
2640 if (trueop1 == CONST0_RTX (mode))
2641 return op0;
2642 if (INTEGRAL_MODE_P (mode)
2643 && trueop1 == CONSTM1_RTX (mode)
2644 && !side_effects_p (op0))
2645 return op1;
2646 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2647 return op0;
2648 /* A | (~A) -> -1 */
2649 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2650 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2651 && ! side_effects_p (op0)
2652 && SCALAR_INT_MODE_P (mode))
2653 return constm1_rtx;
2655 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2656 if (CONST_INT_P (op1)
2657 && HWI_COMPUTABLE_MODE_P (mode)
2658 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2659 && !side_effects_p (op0))
2660 return op1;
2662 /* Canonicalize (X & C1) | C2. */
2663 if (GET_CODE (op0) == AND
2664 && CONST_INT_P (trueop1)
2665 && CONST_INT_P (XEXP (op0, 1)))
2667 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2668 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2669 HOST_WIDE_INT c2 = INTVAL (trueop1);
2671 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2672 if ((c1 & c2) == c1
2673 && !side_effects_p (XEXP (op0, 0)))
2674 return trueop1;
2676 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2677 if (((c1|c2) & mask) == mask)
2678 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2680 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2681 if (((c1 & ~c2) & mask) != (c1 & mask))
2683 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2684 gen_int_mode (c1 & ~c2, mode));
2685 return simplify_gen_binary (IOR, mode, tem, op1);
2689 /* Convert (A & B) | A to A. */
2690 if (GET_CODE (op0) == AND
2691 && (rtx_equal_p (XEXP (op0, 0), op1)
2692 || rtx_equal_p (XEXP (op0, 1), op1))
2693 && ! side_effects_p (XEXP (op0, 0))
2694 && ! side_effects_p (XEXP (op0, 1)))
2695 return op1;
2697 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2698 mode size to (rotate A CX). */
2700 if (GET_CODE (op1) == ASHIFT
2701 || GET_CODE (op1) == SUBREG)
2703 opleft = op1;
2704 opright = op0;
2706 else
2708 opright = op1;
2709 opleft = op0;
2712 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2713 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2714 && CONST_INT_P (XEXP (opleft, 1))
2715 && CONST_INT_P (XEXP (opright, 1))
2716 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2717 == GET_MODE_PRECISION (mode)))
2718 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2720 /* Same, but for ashift that has been "simplified" to a wider mode
2721 by simplify_shift_const. */
2723 if (GET_CODE (opleft) == SUBREG
2724 && is_a <scalar_int_mode> (mode, &int_mode)
2725 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2726 &inner_mode)
2727 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2728 && GET_CODE (opright) == LSHIFTRT
2729 && GET_CODE (XEXP (opright, 0)) == SUBREG
2730 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2731 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2732 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2733 SUBREG_REG (XEXP (opright, 0)))
2734 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2735 && CONST_INT_P (XEXP (opright, 1))
2736 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2737 + INTVAL (XEXP (opright, 1))
2738 == GET_MODE_PRECISION (int_mode)))
2739 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2740 XEXP (SUBREG_REG (opleft), 1));
2742 /* If we have (ior (and (X C1) C2)), simplify this by making
2743 C1 as small as possible if C1 actually changes. */
2744 if (CONST_INT_P (op1)
2745 && (HWI_COMPUTABLE_MODE_P (mode)
2746 || INTVAL (op1) > 0)
2747 && GET_CODE (op0) == AND
2748 && CONST_INT_P (XEXP (op0, 1))
2749 && CONST_INT_P (op1)
2750 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2752 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2753 gen_int_mode (UINTVAL (XEXP (op0, 1))
2754 & ~UINTVAL (op1),
2755 mode));
2756 return simplify_gen_binary (IOR, mode, tmp, op1);
2759 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2760 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2761 the PLUS does not affect any of the bits in OP1: then we can do
2762 the IOR as a PLUS and we can associate. This is valid if OP1
2763 can be safely shifted left C bits. */
2764 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2765 && GET_CODE (XEXP (op0, 0)) == PLUS
2766 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2767 && CONST_INT_P (XEXP (op0, 1))
2768 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2770 int count = INTVAL (XEXP (op0, 1));
2771 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2773 if (mask >> count == INTVAL (trueop1)
2774 && trunc_int_for_mode (mask, mode) == mask
2775 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2776 return simplify_gen_binary (ASHIFTRT, mode,
2777 plus_constant (mode, XEXP (op0, 0),
2778 mask),
2779 XEXP (op0, 1));
2782 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2783 if (tem)
2784 return tem;
2786 tem = simplify_associative_operation (code, mode, op0, op1);
2787 if (tem)
2788 return tem;
2789 break;
2791 case XOR:
2792 if (trueop1 == CONST0_RTX (mode))
2793 return op0;
2794 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2795 return simplify_gen_unary (NOT, mode, op0, mode);
2796 if (rtx_equal_p (trueop0, trueop1)
2797 && ! side_effects_p (op0)
2798 && GET_MODE_CLASS (mode) != MODE_CC)
2799 return CONST0_RTX (mode);
2801 /* Canonicalize XOR of the most significant bit to PLUS. */
2802 if (CONST_SCALAR_INT_P (op1)
2803 && mode_signbit_p (mode, op1))
2804 return simplify_gen_binary (PLUS, mode, op0, op1);
2805 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2806 if (CONST_SCALAR_INT_P (op1)
2807 && GET_CODE (op0) == PLUS
2808 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2809 && mode_signbit_p (mode, XEXP (op0, 1)))
2810 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2811 simplify_gen_binary (XOR, mode, op1,
2812 XEXP (op0, 1)));
2814 /* If we are XORing two things that have no bits in common,
2815 convert them into an IOR. This helps to detect rotation encoded
2816 using those methods and possibly other simplifications. */
2818 if (HWI_COMPUTABLE_MODE_P (mode)
2819 && (nonzero_bits (op0, mode)
2820 & nonzero_bits (op1, mode)) == 0)
2821 return (simplify_gen_binary (IOR, mode, op0, op1));
2823 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2824 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2825 (NOT y). */
2827 int num_negated = 0;
2829 if (GET_CODE (op0) == NOT)
2830 num_negated++, op0 = XEXP (op0, 0);
2831 if (GET_CODE (op1) == NOT)
2832 num_negated++, op1 = XEXP (op1, 0);
2834 if (num_negated == 2)
2835 return simplify_gen_binary (XOR, mode, op0, op1);
2836 else if (num_negated == 1)
2837 return simplify_gen_unary (NOT, mode,
2838 simplify_gen_binary (XOR, mode, op0, op1),
2839 mode);
2842 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2843 correspond to a machine insn or result in further simplifications
2844 if B is a constant. */
2846 if (GET_CODE (op0) == AND
2847 && rtx_equal_p (XEXP (op0, 1), op1)
2848 && ! side_effects_p (op1))
2849 return simplify_gen_binary (AND, mode,
2850 simplify_gen_unary (NOT, mode,
2851 XEXP (op0, 0), mode),
2852 op1);
2854 else if (GET_CODE (op0) == AND
2855 && rtx_equal_p (XEXP (op0, 0), op1)
2856 && ! side_effects_p (op1))
2857 return simplify_gen_binary (AND, mode,
2858 simplify_gen_unary (NOT, mode,
2859 XEXP (op0, 1), mode),
2860 op1);
2862 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2863 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2864 out bits inverted twice and not set by C. Similarly, given
2865 (xor (and (xor A B) C) D), simplify without inverting C in
2866 the xor operand: (xor (and A C) (B&C)^D).
2868 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2869 && GET_CODE (XEXP (op0, 0)) == XOR
2870 && CONST_INT_P (op1)
2871 && CONST_INT_P (XEXP (op0, 1))
2872 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2874 enum rtx_code op = GET_CODE (op0);
2875 rtx a = XEXP (XEXP (op0, 0), 0);
2876 rtx b = XEXP (XEXP (op0, 0), 1);
2877 rtx c = XEXP (op0, 1);
2878 rtx d = op1;
2879 HOST_WIDE_INT bval = INTVAL (b);
2880 HOST_WIDE_INT cval = INTVAL (c);
2881 HOST_WIDE_INT dval = INTVAL (d);
2882 HOST_WIDE_INT xcval;
2884 if (op == IOR)
2885 xcval = ~cval;
2886 else
2887 xcval = cval;
2889 return simplify_gen_binary (XOR, mode,
2890 simplify_gen_binary (op, mode, a, c),
2891 gen_int_mode ((bval & xcval) ^ dval,
2892 mode));
2895 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2896 we can transform like this:
2897 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2898 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2899 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2900 Attempt a few simplifications when B and C are both constants. */
2901 if (GET_CODE (op0) == AND
2902 && CONST_INT_P (op1)
2903 && CONST_INT_P (XEXP (op0, 1)))
2905 rtx a = XEXP (op0, 0);
2906 rtx b = XEXP (op0, 1);
2907 rtx c = op1;
2908 HOST_WIDE_INT bval = INTVAL (b);
2909 HOST_WIDE_INT cval = INTVAL (c);
2911 /* Instead of computing ~A&C, we compute its negated value,
2912 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2913 optimize for sure. If it does not simplify, we still try
2914 to compute ~A&C below, but since that always allocates
2915 RTL, we don't try that before committing to returning a
2916 simplified expression. */
2917 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2918 GEN_INT (~cval));
2920 if ((~cval & bval) == 0)
2922 rtx na_c = NULL_RTX;
2923 if (n_na_c)
2924 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2925 else
2927 /* If ~A does not simplify, don't bother: we don't
2928 want to simplify 2 operations into 3, and if na_c
2929 were to simplify with na, n_na_c would have
2930 simplified as well. */
2931 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2932 if (na)
2933 na_c = simplify_gen_binary (AND, mode, na, c);
2936 /* Try to simplify ~A&C | ~B&C. */
2937 if (na_c != NULL_RTX)
2938 return simplify_gen_binary (IOR, mode, na_c,
2939 gen_int_mode (~bval & cval, mode));
2941 else
2943 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2944 if (n_na_c == CONSTM1_RTX (mode))
2946 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2947 gen_int_mode (~cval & bval,
2948 mode));
2949 return simplify_gen_binary (IOR, mode, a_nc_b,
2950 gen_int_mode (~bval & cval,
2951 mode));
2956 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2957 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2958 machines, and also has shorter instruction path length. */
2959 if (GET_CODE (op0) == AND
2960 && GET_CODE (XEXP (op0, 0)) == XOR
2961 && CONST_INT_P (XEXP (op0, 1))
2962 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2964 rtx a = trueop1;
2965 rtx b = XEXP (XEXP (op0, 0), 1);
2966 rtx c = XEXP (op0, 1);
2967 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2968 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2969 rtx bc = simplify_gen_binary (AND, mode, b, c);
2970 return simplify_gen_binary (IOR, mode, a_nc, bc);
2972 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2973 else if (GET_CODE (op0) == AND
2974 && GET_CODE (XEXP (op0, 0)) == XOR
2975 && CONST_INT_P (XEXP (op0, 1))
2976 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2978 rtx a = XEXP (XEXP (op0, 0), 0);
2979 rtx b = trueop1;
2980 rtx c = XEXP (op0, 1);
2981 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2982 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2983 rtx ac = simplify_gen_binary (AND, mode, a, c);
2984 return simplify_gen_binary (IOR, mode, ac, b_nc);
2987 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2988 comparison if STORE_FLAG_VALUE is 1. */
2989 if (STORE_FLAG_VALUE == 1
2990 && trueop1 == const1_rtx
2991 && COMPARISON_P (op0)
2992 && (reversed = reversed_comparison (op0, mode)))
2993 return reversed;
2995 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2996 is (lt foo (const_int 0)), so we can perform the above
2997 simplification if STORE_FLAG_VALUE is 1. */
2999 if (is_a <scalar_int_mode> (mode, &int_mode)
3000 && STORE_FLAG_VALUE == 1
3001 && trueop1 == const1_rtx
3002 && GET_CODE (op0) == LSHIFTRT
3003 && CONST_INT_P (XEXP (op0, 1))
3004 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3005 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3007 /* (xor (comparison foo bar) (const_int sign-bit))
3008 when STORE_FLAG_VALUE is the sign bit. */
3009 if (is_a <scalar_int_mode> (mode, &int_mode)
3010 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3011 && trueop1 == const_true_rtx
3012 && COMPARISON_P (op0)
3013 && (reversed = reversed_comparison (op0, int_mode)))
3014 return reversed;
3016 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3017 if (tem)
3018 return tem;
3020 tem = simplify_associative_operation (code, mode, op0, op1);
3021 if (tem)
3022 return tem;
3023 break;
3025 case AND:
3026 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3027 return trueop1;
3028 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3029 return op0;
3030 if (HWI_COMPUTABLE_MODE_P (mode))
3032 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3033 HOST_WIDE_INT nzop1;
3034 if (CONST_INT_P (trueop1))
3036 HOST_WIDE_INT val1 = INTVAL (trueop1);
3037 /* If we are turning off bits already known off in OP0, we need
3038 not do an AND. */
3039 if ((nzop0 & ~val1) == 0)
3040 return op0;
3042 nzop1 = nonzero_bits (trueop1, mode);
3043 /* If we are clearing all the nonzero bits, the result is zero. */
3044 if ((nzop1 & nzop0) == 0
3045 && !side_effects_p (op0) && !side_effects_p (op1))
3046 return CONST0_RTX (mode);
3048 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3049 && GET_MODE_CLASS (mode) != MODE_CC)
3050 return op0;
3051 /* A & (~A) -> 0 */
3052 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3053 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3054 && ! side_effects_p (op0)
3055 && GET_MODE_CLASS (mode) != MODE_CC)
3056 return CONST0_RTX (mode);
3058 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3059 there are no nonzero bits of C outside of X's mode. */
3060 if ((GET_CODE (op0) == SIGN_EXTEND
3061 || GET_CODE (op0) == ZERO_EXTEND)
3062 && CONST_INT_P (trueop1)
3063 && HWI_COMPUTABLE_MODE_P (mode)
3064 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3065 & UINTVAL (trueop1)) == 0)
3067 machine_mode imode = GET_MODE (XEXP (op0, 0));
3068 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3069 gen_int_mode (INTVAL (trueop1),
3070 imode));
3071 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3074 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3075 we might be able to further simplify the AND with X and potentially
3076 remove the truncation altogether. */
3077 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3079 rtx x = XEXP (op0, 0);
3080 machine_mode xmode = GET_MODE (x);
3081 tem = simplify_gen_binary (AND, xmode, x,
3082 gen_int_mode (INTVAL (trueop1), xmode));
3083 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3086 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3087 if (GET_CODE (op0) == IOR
3088 && CONST_INT_P (trueop1)
3089 && CONST_INT_P (XEXP (op0, 1)))
3091 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3092 return simplify_gen_binary (IOR, mode,
3093 simplify_gen_binary (AND, mode,
3094 XEXP (op0, 0), op1),
3095 gen_int_mode (tmp, mode));
3098 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3099 insn (and may simplify more). */
3100 if (GET_CODE (op0) == XOR
3101 && rtx_equal_p (XEXP (op0, 0), op1)
3102 && ! side_effects_p (op1))
3103 return simplify_gen_binary (AND, mode,
3104 simplify_gen_unary (NOT, mode,
3105 XEXP (op0, 1), mode),
3106 op1);
3108 if (GET_CODE (op0) == XOR
3109 && rtx_equal_p (XEXP (op0, 1), op1)
3110 && ! side_effects_p (op1))
3111 return simplify_gen_binary (AND, mode,
3112 simplify_gen_unary (NOT, mode,
3113 XEXP (op0, 0), mode),
3114 op1);
3116 /* Similarly for (~(A ^ B)) & A. */
3117 if (GET_CODE (op0) == NOT
3118 && GET_CODE (XEXP (op0, 0)) == XOR
3119 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3120 && ! side_effects_p (op1))
3121 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3123 if (GET_CODE (op0) == NOT
3124 && GET_CODE (XEXP (op0, 0)) == XOR
3125 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3126 && ! side_effects_p (op1))
3127 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3129 /* Convert (A | B) & A to A. */
3130 if (GET_CODE (op0) == IOR
3131 && (rtx_equal_p (XEXP (op0, 0), op1)
3132 || rtx_equal_p (XEXP (op0, 1), op1))
3133 && ! side_effects_p (XEXP (op0, 0))
3134 && ! side_effects_p (XEXP (op0, 1)))
3135 return op1;
3137 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3138 ((A & N) + B) & M -> (A + B) & M
3139 Similarly if (N & M) == 0,
3140 ((A | N) + B) & M -> (A + B) & M
3141 and for - instead of + and/or ^ instead of |.
3142 Also, if (N & M) == 0, then
3143 (A +- N) & M -> A & M. */
3144 if (CONST_INT_P (trueop1)
3145 && HWI_COMPUTABLE_MODE_P (mode)
3146 && ~UINTVAL (trueop1)
3147 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3148 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3150 rtx pmop[2];
3151 int which;
3153 pmop[0] = XEXP (op0, 0);
3154 pmop[1] = XEXP (op0, 1);
3156 if (CONST_INT_P (pmop[1])
3157 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3158 return simplify_gen_binary (AND, mode, pmop[0], op1);
3160 for (which = 0; which < 2; which++)
3162 tem = pmop[which];
3163 switch (GET_CODE (tem))
3165 case AND:
3166 if (CONST_INT_P (XEXP (tem, 1))
3167 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3168 == UINTVAL (trueop1))
3169 pmop[which] = XEXP (tem, 0);
3170 break;
3171 case IOR:
3172 case XOR:
3173 if (CONST_INT_P (XEXP (tem, 1))
3174 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3175 pmop[which] = XEXP (tem, 0);
3176 break;
3177 default:
3178 break;
3182 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3184 tem = simplify_gen_binary (GET_CODE (op0), mode,
3185 pmop[0], pmop[1]);
3186 return simplify_gen_binary (code, mode, tem, op1);
3190 /* (and X (ior (not X) Y) -> (and X Y) */
3191 if (GET_CODE (op1) == IOR
3192 && GET_CODE (XEXP (op1, 0)) == NOT
3193 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3194 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3196 /* (and (ior (not X) Y) X) -> (and X Y) */
3197 if (GET_CODE (op0) == IOR
3198 && GET_CODE (XEXP (op0, 0)) == NOT
3199 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3200 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3202 /* (and X (ior Y (not X)) -> (and X Y) */
3203 if (GET_CODE (op1) == IOR
3204 && GET_CODE (XEXP (op1, 1)) == NOT
3205 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3206 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3208 /* (and (ior Y (not X)) X) -> (and X Y) */
3209 if (GET_CODE (op0) == IOR
3210 && GET_CODE (XEXP (op0, 1)) == NOT
3211 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3212 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3214 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3215 if (tem)
3216 return tem;
3218 tem = simplify_associative_operation (code, mode, op0, op1);
3219 if (tem)
3220 return tem;
3221 break;
3223 case UDIV:
3224 /* 0/x is 0 (or x&0 if x has side-effects). */
3225 if (trueop0 == CONST0_RTX (mode)
3226 && !cfun->can_throw_non_call_exceptions)
3228 if (side_effects_p (op1))
3229 return simplify_gen_binary (AND, mode, op1, trueop0);
3230 return trueop0;
3232 /* x/1 is x. */
3233 if (trueop1 == CONST1_RTX (mode))
3235 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3236 if (tem)
3237 return tem;
3239 /* Convert divide by power of two into shift. */
3240 if (CONST_INT_P (trueop1)
3241 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3242 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3243 break;
3245 case DIV:
3246 /* Handle floating point and integers separately. */
3247 if (SCALAR_FLOAT_MODE_P (mode))
3249 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3250 safe for modes with NaNs, since 0.0 / 0.0 will then be
3251 NaN rather than 0.0. Nor is it safe for modes with signed
3252 zeros, since dividing 0 by a negative number gives -0.0 */
3253 if (trueop0 == CONST0_RTX (mode)
3254 && !HONOR_NANS (mode)
3255 && !HONOR_SIGNED_ZEROS (mode)
3256 && ! side_effects_p (op1))
3257 return op0;
3258 /* x/1.0 is x. */
3259 if (trueop1 == CONST1_RTX (mode)
3260 && !HONOR_SNANS (mode))
3261 return op0;
3263 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3264 && trueop1 != CONST0_RTX (mode))
3266 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3268 /* x/-1.0 is -x. */
3269 if (real_equal (d1, &dconstm1)
3270 && !HONOR_SNANS (mode))
3271 return simplify_gen_unary (NEG, mode, op0, mode);
3273 /* Change FP division by a constant into multiplication.
3274 Only do this with -freciprocal-math. */
3275 if (flag_reciprocal_math
3276 && !real_equal (d1, &dconst0))
3278 REAL_VALUE_TYPE d;
3279 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3280 tem = const_double_from_real_value (d, mode);
3281 return simplify_gen_binary (MULT, mode, op0, tem);
3285 else if (SCALAR_INT_MODE_P (mode))
3287 /* 0/x is 0 (or x&0 if x has side-effects). */
3288 if (trueop0 == CONST0_RTX (mode)
3289 && !cfun->can_throw_non_call_exceptions)
3291 if (side_effects_p (op1))
3292 return simplify_gen_binary (AND, mode, op1, trueop0);
3293 return trueop0;
3295 /* x/1 is x. */
3296 if (trueop1 == CONST1_RTX (mode))
3298 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3299 if (tem)
3300 return tem;
3302 /* x/-1 is -x. */
3303 if (trueop1 == constm1_rtx)
3305 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3306 if (x)
3307 return simplify_gen_unary (NEG, mode, x, mode);
3310 break;
3312 case UMOD:
3313 /* 0%x is 0 (or x&0 if x has side-effects). */
3314 if (trueop0 == CONST0_RTX (mode))
3316 if (side_effects_p (op1))
3317 return simplify_gen_binary (AND, mode, op1, trueop0);
3318 return trueop0;
3320 /* x%1 is 0 (of x&0 if x has side-effects). */
3321 if (trueop1 == CONST1_RTX (mode))
3323 if (side_effects_p (op0))
3324 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3325 return CONST0_RTX (mode);
3327 /* Implement modulus by power of two as AND. */
3328 if (CONST_INT_P (trueop1)
3329 && exact_log2 (UINTVAL (trueop1)) > 0)
3330 return simplify_gen_binary (AND, mode, op0,
3331 gen_int_mode (INTVAL (op1) - 1, mode));
3332 break;
3334 case MOD:
3335 /* 0%x is 0 (or x&0 if x has side-effects). */
3336 if (trueop0 == CONST0_RTX (mode))
3338 if (side_effects_p (op1))
3339 return simplify_gen_binary (AND, mode, op1, trueop0);
3340 return trueop0;
3342 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3343 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3345 if (side_effects_p (op0))
3346 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3347 return CONST0_RTX (mode);
3349 break;
3351 case ROTATERT:
3352 case ROTATE:
3353 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3354 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3355 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3356 amount instead. */
3357 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3358 if (CONST_INT_P (trueop1)
3359 && IN_RANGE (INTVAL (trueop1),
3360 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3361 GET_MODE_PRECISION (mode) - 1))
3362 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3363 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3364 - INTVAL (trueop1)));
3365 #endif
3366 /* FALLTHRU */
3367 case ASHIFTRT:
3368 if (trueop1 == CONST0_RTX (mode))
3369 return op0;
3370 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3371 return op0;
3372 /* Rotating ~0 always results in ~0. */
3373 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3374 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3375 && ! side_effects_p (op1))
3376 return op0;
3378 canonicalize_shift:
3379 /* Given:
3380 scalar modes M1, M2
3381 scalar constants c1, c2
3382 size (M2) > size (M1)
3383 c1 == size (M2) - size (M1)
3384 optimize:
3385 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3386 <low_part>)
3387 (const_int <c2>))
3389 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3390 <low_part>). */
3391 if ((code == ASHIFTRT || code == LSHIFTRT)
3392 && is_a <scalar_int_mode> (mode, &int_mode)
3393 && SUBREG_P (op0)
3394 && CONST_INT_P (op1)
3395 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3396 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3397 &inner_mode)
3398 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3399 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3400 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3401 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3402 && subreg_lowpart_p (op0))
3404 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3405 + INTVAL (op1));
3406 tmp = simplify_gen_binary (code, inner_mode,
3407 XEXP (SUBREG_REG (op0), 0),
3408 tmp);
3409 return lowpart_subreg (int_mode, tmp, inner_mode);
3412 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3414 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3415 if (val != INTVAL (op1))
3416 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3418 break;
3420 case ASHIFT:
3421 case SS_ASHIFT:
3422 case US_ASHIFT:
3423 if (trueop1 == CONST0_RTX (mode))
3424 return op0;
3425 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3426 return op0;
3427 goto canonicalize_shift;
3429 case LSHIFTRT:
3430 if (trueop1 == CONST0_RTX (mode))
3431 return op0;
3432 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3433 return op0;
3434 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3435 if (GET_CODE (op0) == CLZ
3436 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3437 && CONST_INT_P (trueop1)
3438 && STORE_FLAG_VALUE == 1
3439 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3441 unsigned HOST_WIDE_INT zero_val = 0;
3443 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3444 && zero_val == GET_MODE_PRECISION (inner_mode)
3445 && INTVAL (trueop1) == exact_log2 (zero_val))
3446 return simplify_gen_relational (EQ, mode, inner_mode,
3447 XEXP (op0, 0), const0_rtx);
3449 goto canonicalize_shift;
3451 case SMIN:
3452 if (width <= HOST_BITS_PER_WIDE_INT
3453 && mode_signbit_p (mode, trueop1)
3454 && ! side_effects_p (op0))
3455 return op1;
3456 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3457 return op0;
3458 tem = simplify_associative_operation (code, mode, op0, op1);
3459 if (tem)
3460 return tem;
3461 break;
3463 case SMAX:
3464 if (width <= HOST_BITS_PER_WIDE_INT
3465 && CONST_INT_P (trueop1)
3466 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3467 && ! side_effects_p (op0))
3468 return op1;
3469 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3470 return op0;
3471 tem = simplify_associative_operation (code, mode, op0, op1);
3472 if (tem)
3473 return tem;
3474 break;
3476 case UMIN:
3477 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3478 return op1;
3479 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3480 return op0;
3481 tem = simplify_associative_operation (code, mode, op0, op1);
3482 if (tem)
3483 return tem;
3484 break;
3486 case UMAX:
3487 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3488 return op1;
3489 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3490 return op0;
3491 tem = simplify_associative_operation (code, mode, op0, op1);
3492 if (tem)
3493 return tem;
3494 break;
3496 case SS_PLUS:
3497 case US_PLUS:
3498 case SS_MINUS:
3499 case US_MINUS:
3500 case SS_MULT:
3501 case US_MULT:
3502 case SS_DIV:
3503 case US_DIV:
3504 /* ??? There are simplifications that can be done. */
3505 return 0;
3507 case VEC_SELECT:
3508 if (!VECTOR_MODE_P (mode))
3510 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3511 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3512 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3513 gcc_assert (XVECLEN (trueop1, 0) == 1);
3514 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3516 if (GET_CODE (trueop0) == CONST_VECTOR)
3517 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3518 (trueop1, 0, 0)));
3520 /* Extract a scalar element from a nested VEC_SELECT expression
3521 (with optional nested VEC_CONCAT expression). Some targets
3522 (i386) extract scalar element from a vector using chain of
3523 nested VEC_SELECT expressions. When input operand is a memory
3524 operand, this operation can be simplified to a simple scalar
3525 load from an offseted memory address. */
3526 if (GET_CODE (trueop0) == VEC_SELECT)
3528 rtx op0 = XEXP (trueop0, 0);
3529 rtx op1 = XEXP (trueop0, 1);
3531 machine_mode opmode = GET_MODE (op0);
3532 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3533 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3535 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3536 int elem;
3538 rtvec vec;
3539 rtx tmp_op, tmp;
3541 gcc_assert (GET_CODE (op1) == PARALLEL);
3542 gcc_assert (i < n_elts);
3544 /* Select element, pointed by nested selector. */
3545 elem = INTVAL (XVECEXP (op1, 0, i));
3547 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3548 if (GET_CODE (op0) == VEC_CONCAT)
3550 rtx op00 = XEXP (op0, 0);
3551 rtx op01 = XEXP (op0, 1);
3553 machine_mode mode00, mode01;
3554 int n_elts00, n_elts01;
3556 mode00 = GET_MODE (op00);
3557 mode01 = GET_MODE (op01);
3559 /* Find out number of elements of each operand. */
3560 if (VECTOR_MODE_P (mode00))
3562 elt_size = GET_MODE_UNIT_SIZE (mode00);
3563 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3565 else
3566 n_elts00 = 1;
3568 if (VECTOR_MODE_P (mode01))
3570 elt_size = GET_MODE_UNIT_SIZE (mode01);
3571 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3573 else
3574 n_elts01 = 1;
3576 gcc_assert (n_elts == n_elts00 + n_elts01);
3578 /* Select correct operand of VEC_CONCAT
3579 and adjust selector. */
3580 if (elem < n_elts01)
3581 tmp_op = op00;
3582 else
3584 tmp_op = op01;
3585 elem -= n_elts00;
3588 else
3589 tmp_op = op0;
3591 vec = rtvec_alloc (1);
3592 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3594 tmp = gen_rtx_fmt_ee (code, mode,
3595 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3596 return tmp;
3598 if (GET_CODE (trueop0) == VEC_DUPLICATE
3599 && GET_MODE (XEXP (trueop0, 0)) == mode)
3600 return XEXP (trueop0, 0);
3602 else
3604 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3605 gcc_assert (GET_MODE_INNER (mode)
3606 == GET_MODE_INNER (GET_MODE (trueop0)));
3607 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3609 if (GET_CODE (trueop0) == CONST_VECTOR)
3611 int elt_size = GET_MODE_UNIT_SIZE (mode);
3612 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3613 rtvec v = rtvec_alloc (n_elts);
3614 unsigned int i;
3616 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3617 for (i = 0; i < n_elts; i++)
3619 rtx x = XVECEXP (trueop1, 0, i);
3621 gcc_assert (CONST_INT_P (x));
3622 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3623 INTVAL (x));
3626 return gen_rtx_CONST_VECTOR (mode, v);
3629 /* Recognize the identity. */
3630 if (GET_MODE (trueop0) == mode)
3632 bool maybe_ident = true;
3633 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3635 rtx j = XVECEXP (trueop1, 0, i);
3636 if (!CONST_INT_P (j) || INTVAL (j) != i)
3638 maybe_ident = false;
3639 break;
3642 if (maybe_ident)
3643 return trueop0;
3646 /* If we build {a,b} then permute it, build the result directly. */
3647 if (XVECLEN (trueop1, 0) == 2
3648 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3649 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3650 && GET_CODE (trueop0) == VEC_CONCAT
3651 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3652 && GET_MODE (XEXP (trueop0, 0)) == mode
3653 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3654 && GET_MODE (XEXP (trueop0, 1)) == mode)
3656 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3657 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3658 rtx subop0, subop1;
3660 gcc_assert (i0 < 4 && i1 < 4);
3661 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3662 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3664 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3667 if (XVECLEN (trueop1, 0) == 2
3668 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3669 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3670 && GET_CODE (trueop0) == VEC_CONCAT
3671 && GET_MODE (trueop0) == mode)
3673 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3674 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3675 rtx subop0, subop1;
3677 gcc_assert (i0 < 2 && i1 < 2);
3678 subop0 = XEXP (trueop0, i0);
3679 subop1 = XEXP (trueop0, i1);
3681 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3684 /* If we select one half of a vec_concat, return that. */
3685 if (GET_CODE (trueop0) == VEC_CONCAT
3686 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3688 rtx subop0 = XEXP (trueop0, 0);
3689 rtx subop1 = XEXP (trueop0, 1);
3690 machine_mode mode0 = GET_MODE (subop0);
3691 machine_mode mode1 = GET_MODE (subop1);
3692 int li = GET_MODE_UNIT_SIZE (mode0);
3693 int l0 = GET_MODE_SIZE (mode0) / li;
3694 int l1 = GET_MODE_SIZE (mode1) / li;
3695 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3696 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3698 bool success = true;
3699 for (int i = 1; i < l0; ++i)
3701 rtx j = XVECEXP (trueop1, 0, i);
3702 if (!CONST_INT_P (j) || INTVAL (j) != i)
3704 success = false;
3705 break;
3708 if (success)
3709 return subop0;
3711 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3713 bool success = true;
3714 for (int i = 1; i < l1; ++i)
3716 rtx j = XVECEXP (trueop1, 0, i);
3717 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3719 success = false;
3720 break;
3723 if (success)
3724 return subop1;
3729 if (XVECLEN (trueop1, 0) == 1
3730 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3731 && GET_CODE (trueop0) == VEC_CONCAT)
3733 rtx vec = trueop0;
3734 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3736 /* Try to find the element in the VEC_CONCAT. */
3737 while (GET_MODE (vec) != mode
3738 && GET_CODE (vec) == VEC_CONCAT)
3740 HOST_WIDE_INT vec_size;
3742 if (CONST_INT_P (XEXP (vec, 0)))
3744 /* vec_concat of two const_ints doesn't make sense with
3745 respect to modes. */
3746 if (CONST_INT_P (XEXP (vec, 1)))
3747 return 0;
3749 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3750 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3752 else
3753 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3755 if (offset < vec_size)
3756 vec = XEXP (vec, 0);
3757 else
3759 offset -= vec_size;
3760 vec = XEXP (vec, 1);
3762 vec = avoid_constant_pool_reference (vec);
3765 if (GET_MODE (vec) == mode)
3766 return vec;
3769 /* If we select elements in a vec_merge that all come from the same
3770 operand, select from that operand directly. */
3771 if (GET_CODE (op0) == VEC_MERGE)
3773 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3774 if (CONST_INT_P (trueop02))
3776 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3777 bool all_operand0 = true;
3778 bool all_operand1 = true;
3779 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3781 rtx j = XVECEXP (trueop1, 0, i);
3782 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3783 all_operand1 = false;
3784 else
3785 all_operand0 = false;
3787 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3788 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3789 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3790 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3794 /* If we have two nested selects that are inverses of each
3795 other, replace them with the source operand. */
3796 if (GET_CODE (trueop0) == VEC_SELECT
3797 && GET_MODE (XEXP (trueop0, 0)) == mode)
3799 rtx op0_subop1 = XEXP (trueop0, 1);
3800 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3801 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3803 /* Apply the outer ordering vector to the inner one. (The inner
3804 ordering vector is expressly permitted to be of a different
3805 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3806 then the two VEC_SELECTs cancel. */
3807 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3809 rtx x = XVECEXP (trueop1, 0, i);
3810 if (!CONST_INT_P (x))
3811 return 0;
3812 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3813 if (!CONST_INT_P (y) || i != INTVAL (y))
3814 return 0;
3816 return XEXP (trueop0, 0);
3819 return 0;
3820 case VEC_CONCAT:
3822 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3823 ? GET_MODE (trueop0)
3824 : GET_MODE_INNER (mode));
3825 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3826 ? GET_MODE (trueop1)
3827 : GET_MODE_INNER (mode));
3829 gcc_assert (VECTOR_MODE_P (mode));
3830 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3831 == GET_MODE_SIZE (mode));
3833 if (VECTOR_MODE_P (op0_mode))
3834 gcc_assert (GET_MODE_INNER (mode)
3835 == GET_MODE_INNER (op0_mode));
3836 else
3837 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3839 if (VECTOR_MODE_P (op1_mode))
3840 gcc_assert (GET_MODE_INNER (mode)
3841 == GET_MODE_INNER (op1_mode));
3842 else
3843 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3845 if ((GET_CODE (trueop0) == CONST_VECTOR
3846 || CONST_SCALAR_INT_P (trueop0)
3847 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3848 && (GET_CODE (trueop1) == CONST_VECTOR
3849 || CONST_SCALAR_INT_P (trueop1)
3850 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3852 int elt_size = GET_MODE_UNIT_SIZE (mode);
3853 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3854 rtvec v = rtvec_alloc (n_elts);
3855 unsigned int i;
3856 unsigned in_n_elts = 1;
3858 if (VECTOR_MODE_P (op0_mode))
3859 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3860 for (i = 0; i < n_elts; i++)
3862 if (i < in_n_elts)
3864 if (!VECTOR_MODE_P (op0_mode))
3865 RTVEC_ELT (v, i) = trueop0;
3866 else
3867 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3869 else
3871 if (!VECTOR_MODE_P (op1_mode))
3872 RTVEC_ELT (v, i) = trueop1;
3873 else
3874 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3875 i - in_n_elts);
3879 return gen_rtx_CONST_VECTOR (mode, v);
3882 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3883 Restrict the transformation to avoid generating a VEC_SELECT with a
3884 mode unrelated to its operand. */
3885 if (GET_CODE (trueop0) == VEC_SELECT
3886 && GET_CODE (trueop1) == VEC_SELECT
3887 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3888 && GET_MODE (XEXP (trueop0, 0)) == mode)
3890 rtx par0 = XEXP (trueop0, 1);
3891 rtx par1 = XEXP (trueop1, 1);
3892 int len0 = XVECLEN (par0, 0);
3893 int len1 = XVECLEN (par1, 0);
3894 rtvec vec = rtvec_alloc (len0 + len1);
3895 for (int i = 0; i < len0; i++)
3896 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3897 for (int i = 0; i < len1; i++)
3898 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3899 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3900 gen_rtx_PARALLEL (VOIDmode, vec));
3903 return 0;
3905 default:
3906 gcc_unreachable ();
3909 return 0;
3913 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3914 rtx op0, rtx op1)
3916 if (VECTOR_MODE_P (mode)
3917 && code != VEC_CONCAT
3918 && GET_CODE (op0) == CONST_VECTOR
3919 && GET_CODE (op1) == CONST_VECTOR)
3921 unsigned n_elts = GET_MODE_NUNITS (mode);
3922 machine_mode op0mode = GET_MODE (op0);
3923 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3924 machine_mode op1mode = GET_MODE (op1);
3925 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3926 rtvec v = rtvec_alloc (n_elts);
3927 unsigned int i;
3929 gcc_assert (op0_n_elts == n_elts);
3930 gcc_assert (op1_n_elts == n_elts);
3931 for (i = 0; i < n_elts; i++)
3933 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3934 CONST_VECTOR_ELT (op0, i),
3935 CONST_VECTOR_ELT (op1, i));
3936 if (!x)
3937 return 0;
3938 RTVEC_ELT (v, i) = x;
3941 return gen_rtx_CONST_VECTOR (mode, v);
3944 if (VECTOR_MODE_P (mode)
3945 && code == VEC_CONCAT
3946 && (CONST_SCALAR_INT_P (op0)
3947 || GET_CODE (op0) == CONST_FIXED
3948 || CONST_DOUBLE_AS_FLOAT_P (op0))
3949 && (CONST_SCALAR_INT_P (op1)
3950 || CONST_DOUBLE_AS_FLOAT_P (op1)
3951 || GET_CODE (op1) == CONST_FIXED))
3953 unsigned n_elts = GET_MODE_NUNITS (mode);
3954 rtvec v = rtvec_alloc (n_elts);
3956 gcc_assert (n_elts >= 2);
3957 if (n_elts == 2)
3959 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3960 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3962 RTVEC_ELT (v, 0) = op0;
3963 RTVEC_ELT (v, 1) = op1;
3965 else
3967 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3968 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3969 unsigned i;
3971 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3972 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3973 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3975 for (i = 0; i < op0_n_elts; ++i)
3976 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3977 for (i = 0; i < op1_n_elts; ++i)
3978 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3981 return gen_rtx_CONST_VECTOR (mode, v);
3984 if (SCALAR_FLOAT_MODE_P (mode)
3985 && CONST_DOUBLE_AS_FLOAT_P (op0)
3986 && CONST_DOUBLE_AS_FLOAT_P (op1)
3987 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3989 if (code == AND
3990 || code == IOR
3991 || code == XOR)
3993 long tmp0[4];
3994 long tmp1[4];
3995 REAL_VALUE_TYPE r;
3996 int i;
3998 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3999 GET_MODE (op0));
4000 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4001 GET_MODE (op1));
4002 for (i = 0; i < 4; i++)
4004 switch (code)
4006 case AND:
4007 tmp0[i] &= tmp1[i];
4008 break;
4009 case IOR:
4010 tmp0[i] |= tmp1[i];
4011 break;
4012 case XOR:
4013 tmp0[i] ^= tmp1[i];
4014 break;
4015 default:
4016 gcc_unreachable ();
4019 real_from_target (&r, tmp0, mode);
4020 return const_double_from_real_value (r, mode);
4022 else
4024 REAL_VALUE_TYPE f0, f1, value, result;
4025 const REAL_VALUE_TYPE *opr0, *opr1;
4026 bool inexact;
4028 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4029 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4031 if (HONOR_SNANS (mode)
4032 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4033 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4034 return 0;
4036 real_convert (&f0, mode, opr0);
4037 real_convert (&f1, mode, opr1);
4039 if (code == DIV
4040 && real_equal (&f1, &dconst0)
4041 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4042 return 0;
4044 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4045 && flag_trapping_math
4046 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4048 int s0 = REAL_VALUE_NEGATIVE (f0);
4049 int s1 = REAL_VALUE_NEGATIVE (f1);
4051 switch (code)
4053 case PLUS:
4054 /* Inf + -Inf = NaN plus exception. */
4055 if (s0 != s1)
4056 return 0;
4057 break;
4058 case MINUS:
4059 /* Inf - Inf = NaN plus exception. */
4060 if (s0 == s1)
4061 return 0;
4062 break;
4063 case DIV:
4064 /* Inf / Inf = NaN plus exception. */
4065 return 0;
4066 default:
4067 break;
4071 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4072 && flag_trapping_math
4073 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4074 || (REAL_VALUE_ISINF (f1)
4075 && real_equal (&f0, &dconst0))))
4076 /* Inf * 0 = NaN plus exception. */
4077 return 0;
4079 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4080 &f0, &f1);
4081 real_convert (&result, mode, &value);
4083 /* Don't constant fold this floating point operation if
4084 the result has overflowed and flag_trapping_math. */
4086 if (flag_trapping_math
4087 && MODE_HAS_INFINITIES (mode)
4088 && REAL_VALUE_ISINF (result)
4089 && !REAL_VALUE_ISINF (f0)
4090 && !REAL_VALUE_ISINF (f1))
4091 /* Overflow plus exception. */
4092 return 0;
4094 /* Don't constant fold this floating point operation if the
4095 result may dependent upon the run-time rounding mode and
4096 flag_rounding_math is set, or if GCC's software emulation
4097 is unable to accurately represent the result. */
4099 if ((flag_rounding_math
4100 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4101 && (inexact || !real_identical (&result, &value)))
4102 return NULL_RTX;
4104 return const_double_from_real_value (result, mode);
4108 /* We can fold some multi-word operations. */
4109 scalar_int_mode int_mode;
4110 if (is_a <scalar_int_mode> (mode, &int_mode)
4111 && CONST_SCALAR_INT_P (op0)
4112 && CONST_SCALAR_INT_P (op1))
4114 wide_int result;
4115 bool overflow;
4116 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4117 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4119 #if TARGET_SUPPORTS_WIDE_INT == 0
4120 /* This assert keeps the simplification from producing a result
4121 that cannot be represented in a CONST_DOUBLE but a lot of
4122 upstream callers expect that this function never fails to
4123 simplify something and so you if you added this to the test
4124 above the code would die later anyway. If this assert
4125 happens, you just need to make the port support wide int. */
4126 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4127 #endif
4128 switch (code)
4130 case MINUS:
4131 result = wi::sub (pop0, pop1);
4132 break;
4134 case PLUS:
4135 result = wi::add (pop0, pop1);
4136 break;
4138 case MULT:
4139 result = wi::mul (pop0, pop1);
4140 break;
4142 case DIV:
4143 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4144 if (overflow)
4145 return NULL_RTX;
4146 break;
4148 case MOD:
4149 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4150 if (overflow)
4151 return NULL_RTX;
4152 break;
4154 case UDIV:
4155 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4156 if (overflow)
4157 return NULL_RTX;
4158 break;
4160 case UMOD:
4161 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4162 if (overflow)
4163 return NULL_RTX;
4164 break;
4166 case AND:
4167 result = wi::bit_and (pop0, pop1);
4168 break;
4170 case IOR:
4171 result = wi::bit_or (pop0, pop1);
4172 break;
4174 case XOR:
4175 result = wi::bit_xor (pop0, pop1);
4176 break;
4178 case SMIN:
4179 result = wi::smin (pop0, pop1);
4180 break;
4182 case SMAX:
4183 result = wi::smax (pop0, pop1);
4184 break;
4186 case UMIN:
4187 result = wi::umin (pop0, pop1);
4188 break;
4190 case UMAX:
4191 result = wi::umax (pop0, pop1);
4192 break;
4194 case LSHIFTRT:
4195 case ASHIFTRT:
4196 case ASHIFT:
4198 wide_int wop1 = pop1;
4199 if (SHIFT_COUNT_TRUNCATED)
4200 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4201 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4202 return NULL_RTX;
4204 switch (code)
4206 case LSHIFTRT:
4207 result = wi::lrshift (pop0, wop1);
4208 break;
4210 case ASHIFTRT:
4211 result = wi::arshift (pop0, wop1);
4212 break;
4214 case ASHIFT:
4215 result = wi::lshift (pop0, wop1);
4216 break;
4218 default:
4219 gcc_unreachable ();
4221 break;
4223 case ROTATE:
4224 case ROTATERT:
4226 if (wi::neg_p (pop1))
4227 return NULL_RTX;
4229 switch (code)
4231 case ROTATE:
4232 result = wi::lrotate (pop0, pop1);
4233 break;
4235 case ROTATERT:
4236 result = wi::rrotate (pop0, pop1);
4237 break;
4239 default:
4240 gcc_unreachable ();
4242 break;
4244 default:
4245 return NULL_RTX;
4247 return immed_wide_int_const (result, int_mode);
4250 return NULL_RTX;
4255 /* Return a positive integer if X should sort after Y. The value
4256 returned is 1 if and only if X and Y are both regs. */
4258 static int
4259 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4261 int result;
4263 result = (commutative_operand_precedence (y)
4264 - commutative_operand_precedence (x));
4265 if (result)
4266 return result + result;
4268 /* Group together equal REGs to do more simplification. */
4269 if (REG_P (x) && REG_P (y))
4270 return REGNO (x) > REGNO (y);
4272 return 0;
4275 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4276 operands may be another PLUS or MINUS.
4278 Rather than test for specific case, we do this by a brute-force method
4279 and do all possible simplifications until no more changes occur. Then
4280 we rebuild the operation.
4282 May return NULL_RTX when no changes were made. */
4284 static rtx
4285 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4286 rtx op1)
4288 struct simplify_plus_minus_op_data
4290 rtx op;
4291 short neg;
4292 } ops[16];
4293 rtx result, tem;
4294 int n_ops = 2;
4295 int changed, n_constants, canonicalized = 0;
4296 int i, j;
4298 memset (ops, 0, sizeof ops);
4300 /* Set up the two operands and then expand them until nothing has been
4301 changed. If we run out of room in our array, give up; this should
4302 almost never happen. */
4304 ops[0].op = op0;
4305 ops[0].neg = 0;
4306 ops[1].op = op1;
4307 ops[1].neg = (code == MINUS);
4311 changed = 0;
4312 n_constants = 0;
4314 for (i = 0; i < n_ops; i++)
4316 rtx this_op = ops[i].op;
4317 int this_neg = ops[i].neg;
4318 enum rtx_code this_code = GET_CODE (this_op);
4320 switch (this_code)
4322 case PLUS:
4323 case MINUS:
4324 if (n_ops == ARRAY_SIZE (ops))
4325 return NULL_RTX;
4327 ops[n_ops].op = XEXP (this_op, 1);
4328 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4329 n_ops++;
4331 ops[i].op = XEXP (this_op, 0);
4332 changed = 1;
4333 /* If this operand was negated then we will potentially
4334 canonicalize the expression. Similarly if we don't
4335 place the operands adjacent we're re-ordering the
4336 expression and thus might be performing a
4337 canonicalization. Ignore register re-ordering.
4338 ??? It might be better to shuffle the ops array here,
4339 but then (plus (plus (A, B), plus (C, D))) wouldn't
4340 be seen as non-canonical. */
4341 if (this_neg
4342 || (i != n_ops - 2
4343 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4344 canonicalized = 1;
4345 break;
4347 case NEG:
4348 ops[i].op = XEXP (this_op, 0);
4349 ops[i].neg = ! this_neg;
4350 changed = 1;
4351 canonicalized = 1;
4352 break;
4354 case CONST:
4355 if (n_ops != ARRAY_SIZE (ops)
4356 && GET_CODE (XEXP (this_op, 0)) == PLUS
4357 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4358 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4360 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4361 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4362 ops[n_ops].neg = this_neg;
4363 n_ops++;
4364 changed = 1;
4365 canonicalized = 1;
4367 break;
4369 case NOT:
4370 /* ~a -> (-a - 1) */
4371 if (n_ops != ARRAY_SIZE (ops))
4373 ops[n_ops].op = CONSTM1_RTX (mode);
4374 ops[n_ops++].neg = this_neg;
4375 ops[i].op = XEXP (this_op, 0);
4376 ops[i].neg = !this_neg;
4377 changed = 1;
4378 canonicalized = 1;
4380 break;
4382 case CONST_INT:
4383 n_constants++;
4384 if (this_neg)
4386 ops[i].op = neg_const_int (mode, this_op);
4387 ops[i].neg = 0;
4388 changed = 1;
4389 canonicalized = 1;
4391 break;
4393 default:
4394 break;
4398 while (changed);
4400 if (n_constants > 1)
4401 canonicalized = 1;
4403 gcc_assert (n_ops >= 2);
4405 /* If we only have two operands, we can avoid the loops. */
4406 if (n_ops == 2)
4408 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4409 rtx lhs, rhs;
4411 /* Get the two operands. Be careful with the order, especially for
4412 the cases where code == MINUS. */
4413 if (ops[0].neg && ops[1].neg)
4415 lhs = gen_rtx_NEG (mode, ops[0].op);
4416 rhs = ops[1].op;
4418 else if (ops[0].neg)
4420 lhs = ops[1].op;
4421 rhs = ops[0].op;
4423 else
4425 lhs = ops[0].op;
4426 rhs = ops[1].op;
4429 return simplify_const_binary_operation (code, mode, lhs, rhs);
4432 /* Now simplify each pair of operands until nothing changes. */
4433 while (1)
4435 /* Insertion sort is good enough for a small array. */
4436 for (i = 1; i < n_ops; i++)
4438 struct simplify_plus_minus_op_data save;
4439 int cmp;
4441 j = i - 1;
4442 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4443 if (cmp <= 0)
4444 continue;
4445 /* Just swapping registers doesn't count as canonicalization. */
4446 if (cmp != 1)
4447 canonicalized = 1;
4449 save = ops[i];
4451 ops[j + 1] = ops[j];
4452 while (j--
4453 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4454 ops[j + 1] = save;
4457 changed = 0;
4458 for (i = n_ops - 1; i > 0; i--)
4459 for (j = i - 1; j >= 0; j--)
4461 rtx lhs = ops[j].op, rhs = ops[i].op;
4462 int lneg = ops[j].neg, rneg = ops[i].neg;
4464 if (lhs != 0 && rhs != 0)
4466 enum rtx_code ncode = PLUS;
4468 if (lneg != rneg)
4470 ncode = MINUS;
4471 if (lneg)
4472 std::swap (lhs, rhs);
4474 else if (swap_commutative_operands_p (lhs, rhs))
4475 std::swap (lhs, rhs);
4477 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4478 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4480 rtx tem_lhs, tem_rhs;
4482 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4483 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4484 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4485 tem_rhs);
4487 if (tem && !CONSTANT_P (tem))
4488 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4490 else
4491 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4493 if (tem)
4495 /* Reject "simplifications" that just wrap the two
4496 arguments in a CONST. Failure to do so can result
4497 in infinite recursion with simplify_binary_operation
4498 when it calls us to simplify CONST operations.
4499 Also, if we find such a simplification, don't try
4500 any more combinations with this rhs: We must have
4501 something like symbol+offset, ie. one of the
4502 trivial CONST expressions we handle later. */
4503 if (GET_CODE (tem) == CONST
4504 && GET_CODE (XEXP (tem, 0)) == ncode
4505 && XEXP (XEXP (tem, 0), 0) == lhs
4506 && XEXP (XEXP (tem, 0), 1) == rhs)
4507 break;
4508 lneg &= rneg;
4509 if (GET_CODE (tem) == NEG)
4510 tem = XEXP (tem, 0), lneg = !lneg;
4511 if (CONST_INT_P (tem) && lneg)
4512 tem = neg_const_int (mode, tem), lneg = 0;
4514 ops[i].op = tem;
4515 ops[i].neg = lneg;
4516 ops[j].op = NULL_RTX;
4517 changed = 1;
4518 canonicalized = 1;
4523 if (!changed)
4524 break;
4526 /* Pack all the operands to the lower-numbered entries. */
4527 for (i = 0, j = 0; j < n_ops; j++)
4528 if (ops[j].op)
4530 ops[i] = ops[j];
4531 i++;
4533 n_ops = i;
4536 /* If nothing changed, check that rematerialization of rtl instructions
4537 is still required. */
4538 if (!canonicalized)
4540 /* Perform rematerialization if only all operands are registers and
4541 all operations are PLUS. */
4542 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4543 around rs6000 and how it uses the CA register. See PR67145. */
4544 for (i = 0; i < n_ops; i++)
4545 if (ops[i].neg
4546 || !REG_P (ops[i].op)
4547 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4548 && fixed_regs[REGNO (ops[i].op)]
4549 && !global_regs[REGNO (ops[i].op)]
4550 && ops[i].op != frame_pointer_rtx
4551 && ops[i].op != arg_pointer_rtx
4552 && ops[i].op != stack_pointer_rtx))
4553 return NULL_RTX;
4554 goto gen_result;
4557 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4558 if (n_ops == 2
4559 && CONST_INT_P (ops[1].op)
4560 && CONSTANT_P (ops[0].op)
4561 && ops[0].neg)
4562 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4564 /* We suppressed creation of trivial CONST expressions in the
4565 combination loop to avoid recursion. Create one manually now.
4566 The combination loop should have ensured that there is exactly
4567 one CONST_INT, and the sort will have ensured that it is last
4568 in the array and that any other constant will be next-to-last. */
4570 if (n_ops > 1
4571 && CONST_INT_P (ops[n_ops - 1].op)
4572 && CONSTANT_P (ops[n_ops - 2].op))
4574 rtx value = ops[n_ops - 1].op;
4575 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4576 value = neg_const_int (mode, value);
4577 if (CONST_INT_P (value))
4579 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4580 INTVAL (value));
4581 n_ops--;
4585 /* Put a non-negated operand first, if possible. */
4587 for (i = 0; i < n_ops && ops[i].neg; i++)
4588 continue;
4589 if (i == n_ops)
4590 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4591 else if (i != 0)
4593 tem = ops[0].op;
4594 ops[0] = ops[i];
4595 ops[i].op = tem;
4596 ops[i].neg = 1;
4599 /* Now make the result by performing the requested operations. */
4600 gen_result:
4601 result = ops[0].op;
4602 for (i = 1; i < n_ops; i++)
4603 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4604 mode, result, ops[i].op);
4606 return result;
4609 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4610 static bool
4611 plus_minus_operand_p (const_rtx x)
4613 return GET_CODE (x) == PLUS
4614 || GET_CODE (x) == MINUS
4615 || (GET_CODE (x) == CONST
4616 && GET_CODE (XEXP (x, 0)) == PLUS
4617 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4618 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4621 /* Like simplify_binary_operation except used for relational operators.
4622 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4623 not also be VOIDmode.
4625 CMP_MODE specifies in which mode the comparison is done in, so it is
4626 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4627 the operands or, if both are VOIDmode, the operands are compared in
4628 "infinite precision". */
4630 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4631 machine_mode cmp_mode, rtx op0, rtx op1)
4633 rtx tem, trueop0, trueop1;
4635 if (cmp_mode == VOIDmode)
4636 cmp_mode = GET_MODE (op0);
4637 if (cmp_mode == VOIDmode)
4638 cmp_mode = GET_MODE (op1);
4640 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4641 if (tem)
4643 if (SCALAR_FLOAT_MODE_P (mode))
4645 if (tem == const0_rtx)
4646 return CONST0_RTX (mode);
4647 #ifdef FLOAT_STORE_FLAG_VALUE
4649 REAL_VALUE_TYPE val;
4650 val = FLOAT_STORE_FLAG_VALUE (mode);
4651 return const_double_from_real_value (val, mode);
4653 #else
4654 return NULL_RTX;
4655 #endif
4657 if (VECTOR_MODE_P (mode))
4659 if (tem == const0_rtx)
4660 return CONST0_RTX (mode);
4661 #ifdef VECTOR_STORE_FLAG_VALUE
4663 int i, units;
4664 rtvec v;
4666 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4667 if (val == NULL_RTX)
4668 return NULL_RTX;
4669 if (val == const1_rtx)
4670 return CONST1_RTX (mode);
4672 units = GET_MODE_NUNITS (mode);
4673 v = rtvec_alloc (units);
4674 for (i = 0; i < units; i++)
4675 RTVEC_ELT (v, i) = val;
4676 return gen_rtx_raw_CONST_VECTOR (mode, v);
4678 #else
4679 return NULL_RTX;
4680 #endif
4683 return tem;
4686 /* For the following tests, ensure const0_rtx is op1. */
4687 if (swap_commutative_operands_p (op0, op1)
4688 || (op0 == const0_rtx && op1 != const0_rtx))
4689 std::swap (op0, op1), code = swap_condition (code);
4691 /* If op0 is a compare, extract the comparison arguments from it. */
4692 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4693 return simplify_gen_relational (code, mode, VOIDmode,
4694 XEXP (op0, 0), XEXP (op0, 1));
4696 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4697 || CC0_P (op0))
4698 return NULL_RTX;
4700 trueop0 = avoid_constant_pool_reference (op0);
4701 trueop1 = avoid_constant_pool_reference (op1);
4702 return simplify_relational_operation_1 (code, mode, cmp_mode,
4703 trueop0, trueop1);
4706 /* This part of simplify_relational_operation is only used when CMP_MODE
4707 is not in class MODE_CC (i.e. it is a real comparison).
4709 MODE is the mode of the result, while CMP_MODE specifies in which
4710 mode the comparison is done in, so it is the mode of the operands. */
4712 static rtx
4713 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4714 machine_mode cmp_mode, rtx op0, rtx op1)
4716 enum rtx_code op0code = GET_CODE (op0);
4718 if (op1 == const0_rtx && COMPARISON_P (op0))
4720 /* If op0 is a comparison, extract the comparison arguments
4721 from it. */
4722 if (code == NE)
4724 if (GET_MODE (op0) == mode)
4725 return simplify_rtx (op0);
4726 else
4727 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4728 XEXP (op0, 0), XEXP (op0, 1));
4730 else if (code == EQ)
4732 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4733 if (new_code != UNKNOWN)
4734 return simplify_gen_relational (new_code, mode, VOIDmode,
4735 XEXP (op0, 0), XEXP (op0, 1));
4739 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4740 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4741 if ((code == LTU || code == GEU)
4742 && GET_CODE (op0) == PLUS
4743 && CONST_INT_P (XEXP (op0, 1))
4744 && (rtx_equal_p (op1, XEXP (op0, 0))
4745 || rtx_equal_p (op1, XEXP (op0, 1)))
4746 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4747 && XEXP (op0, 1) != const0_rtx)
4749 rtx new_cmp
4750 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4751 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4752 cmp_mode, XEXP (op0, 0), new_cmp);
4755 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4756 transformed into (LTU a -C). */
4757 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4758 && CONST_INT_P (XEXP (op0, 1))
4759 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4760 && XEXP (op0, 1) != const0_rtx)
4762 rtx new_cmp
4763 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4764 return simplify_gen_relational (LTU, mode, cmp_mode,
4765 XEXP (op0, 0), new_cmp);
4768 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4769 if ((code == LTU || code == GEU)
4770 && GET_CODE (op0) == PLUS
4771 && rtx_equal_p (op1, XEXP (op0, 1))
4772 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4773 && !rtx_equal_p (op1, XEXP (op0, 0)))
4774 return simplify_gen_relational (code, mode, cmp_mode, op0,
4775 copy_rtx (XEXP (op0, 0)));
4777 if (op1 == const0_rtx)
4779 /* Canonicalize (GTU x 0) as (NE x 0). */
4780 if (code == GTU)
4781 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4782 /* Canonicalize (LEU x 0) as (EQ x 0). */
4783 if (code == LEU)
4784 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4786 else if (op1 == const1_rtx)
4788 switch (code)
4790 case GE:
4791 /* Canonicalize (GE x 1) as (GT x 0). */
4792 return simplify_gen_relational (GT, mode, cmp_mode,
4793 op0, const0_rtx);
4794 case GEU:
4795 /* Canonicalize (GEU x 1) as (NE x 0). */
4796 return simplify_gen_relational (NE, mode, cmp_mode,
4797 op0, const0_rtx);
4798 case LT:
4799 /* Canonicalize (LT x 1) as (LE x 0). */
4800 return simplify_gen_relational (LE, mode, cmp_mode,
4801 op0, const0_rtx);
4802 case LTU:
4803 /* Canonicalize (LTU x 1) as (EQ x 0). */
4804 return simplify_gen_relational (EQ, mode, cmp_mode,
4805 op0, const0_rtx);
4806 default:
4807 break;
4810 else if (op1 == constm1_rtx)
4812 /* Canonicalize (LE x -1) as (LT x 0). */
4813 if (code == LE)
4814 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4815 /* Canonicalize (GT x -1) as (GE x 0). */
4816 if (code == GT)
4817 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4820 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4821 if ((code == EQ || code == NE)
4822 && (op0code == PLUS || op0code == MINUS)
4823 && CONSTANT_P (op1)
4824 && CONSTANT_P (XEXP (op0, 1))
4825 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4827 rtx x = XEXP (op0, 0);
4828 rtx c = XEXP (op0, 1);
4829 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4830 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4832 /* Detect an infinite recursive condition, where we oscillate at this
4833 simplification case between:
4834 A + B == C <---> C - B == A,
4835 where A, B, and C are all constants with non-simplifiable expressions,
4836 usually SYMBOL_REFs. */
4837 if (GET_CODE (tem) == invcode
4838 && CONSTANT_P (x)
4839 && rtx_equal_p (c, XEXP (tem, 1)))
4840 return NULL_RTX;
4842 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4845 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4846 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4847 scalar_int_mode int_mode, int_cmp_mode;
4848 if (code == NE
4849 && op1 == const0_rtx
4850 && is_int_mode (mode, &int_mode)
4851 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4852 /* ??? Work-around BImode bugs in the ia64 backend. */
4853 && int_mode != BImode
4854 && int_cmp_mode != BImode
4855 && nonzero_bits (op0, int_cmp_mode) == 1
4856 && STORE_FLAG_VALUE == 1)
4857 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4858 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
4859 : lowpart_subreg (int_mode, op0, int_cmp_mode);
4861 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4862 if ((code == EQ || code == NE)
4863 && op1 == const0_rtx
4864 && op0code == XOR)
4865 return simplify_gen_relational (code, mode, cmp_mode,
4866 XEXP (op0, 0), XEXP (op0, 1));
4868 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4869 if ((code == EQ || code == NE)
4870 && op0code == XOR
4871 && rtx_equal_p (XEXP (op0, 0), op1)
4872 && !side_effects_p (XEXP (op0, 0)))
4873 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4874 CONST0_RTX (mode));
4876 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4877 if ((code == EQ || code == NE)
4878 && op0code == XOR
4879 && rtx_equal_p (XEXP (op0, 1), op1)
4880 && !side_effects_p (XEXP (op0, 1)))
4881 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4882 CONST0_RTX (mode));
4884 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4885 if ((code == EQ || code == NE)
4886 && op0code == XOR
4887 && CONST_SCALAR_INT_P (op1)
4888 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4889 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4890 simplify_gen_binary (XOR, cmp_mode,
4891 XEXP (op0, 1), op1));
4893 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4894 can be implemented with a BICS instruction on some targets, or
4895 constant-folded if y is a constant. */
4896 if ((code == EQ || code == NE)
4897 && op0code == AND
4898 && rtx_equal_p (XEXP (op0, 0), op1)
4899 && !side_effects_p (op1)
4900 && op1 != CONST0_RTX (cmp_mode))
4902 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4903 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4905 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4906 CONST0_RTX (cmp_mode));
4909 /* Likewise for (eq/ne (and x y) y). */
4910 if ((code == EQ || code == NE)
4911 && op0code == AND
4912 && rtx_equal_p (XEXP (op0, 1), op1)
4913 && !side_effects_p (op1)
4914 && op1 != CONST0_RTX (cmp_mode))
4916 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4917 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4919 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4920 CONST0_RTX (cmp_mode));
4923 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4924 if ((code == EQ || code == NE)
4925 && GET_CODE (op0) == BSWAP
4926 && CONST_SCALAR_INT_P (op1))
4927 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4928 simplify_gen_unary (BSWAP, cmp_mode,
4929 op1, cmp_mode));
4931 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4932 if ((code == EQ || code == NE)
4933 && GET_CODE (op0) == BSWAP
4934 && GET_CODE (op1) == BSWAP)
4935 return simplify_gen_relational (code, mode, cmp_mode,
4936 XEXP (op0, 0), XEXP (op1, 0));
4938 if (op0code == POPCOUNT && op1 == const0_rtx)
4939 switch (code)
4941 case EQ:
4942 case LE:
4943 case LEU:
4944 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4945 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4946 XEXP (op0, 0), const0_rtx);
4948 case NE:
4949 case GT:
4950 case GTU:
4951 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4952 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4953 XEXP (op0, 0), const0_rtx);
4955 default:
4956 break;
4959 return NULL_RTX;
4962 enum
4964 CMP_EQ = 1,
4965 CMP_LT = 2,
4966 CMP_GT = 4,
4967 CMP_LTU = 8,
4968 CMP_GTU = 16
4972 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4973 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4974 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4975 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4976 For floating-point comparisons, assume that the operands were ordered. */
4978 static rtx
4979 comparison_result (enum rtx_code code, int known_results)
4981 switch (code)
4983 case EQ:
4984 case UNEQ:
4985 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4986 case NE:
4987 case LTGT:
4988 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4990 case LT:
4991 case UNLT:
4992 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4993 case GE:
4994 case UNGE:
4995 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4997 case GT:
4998 case UNGT:
4999 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5000 case LE:
5001 case UNLE:
5002 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5004 case LTU:
5005 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5006 case GEU:
5007 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5009 case GTU:
5010 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5011 case LEU:
5012 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5014 case ORDERED:
5015 return const_true_rtx;
5016 case UNORDERED:
5017 return const0_rtx;
5018 default:
5019 gcc_unreachable ();
5023 /* Check if the given comparison (done in the given MODE) is actually
5024 a tautology or a contradiction. If the mode is VOID_mode, the
5025 comparison is done in "infinite precision". If no simplification
5026 is possible, this function returns zero. Otherwise, it returns
5027 either const_true_rtx or const0_rtx. */
5030 simplify_const_relational_operation (enum rtx_code code,
5031 machine_mode mode,
5032 rtx op0, rtx op1)
5034 rtx tem;
5035 rtx trueop0;
5036 rtx trueop1;
5038 gcc_assert (mode != VOIDmode
5039 || (GET_MODE (op0) == VOIDmode
5040 && GET_MODE (op1) == VOIDmode));
5042 /* If op0 is a compare, extract the comparison arguments from it. */
5043 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5045 op1 = XEXP (op0, 1);
5046 op0 = XEXP (op0, 0);
5048 if (GET_MODE (op0) != VOIDmode)
5049 mode = GET_MODE (op0);
5050 else if (GET_MODE (op1) != VOIDmode)
5051 mode = GET_MODE (op1);
5052 else
5053 return 0;
5056 /* We can't simplify MODE_CC values since we don't know what the
5057 actual comparison is. */
5058 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5059 return 0;
5061 /* Make sure the constant is second. */
5062 if (swap_commutative_operands_p (op0, op1))
5064 std::swap (op0, op1);
5065 code = swap_condition (code);
5068 trueop0 = avoid_constant_pool_reference (op0);
5069 trueop1 = avoid_constant_pool_reference (op1);
5071 /* For integer comparisons of A and B maybe we can simplify A - B and can
5072 then simplify a comparison of that with zero. If A and B are both either
5073 a register or a CONST_INT, this can't help; testing for these cases will
5074 prevent infinite recursion here and speed things up.
5076 We can only do this for EQ and NE comparisons as otherwise we may
5077 lose or introduce overflow which we cannot disregard as undefined as
5078 we do not know the signedness of the operation on either the left or
5079 the right hand side of the comparison. */
5081 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5082 && (code == EQ || code == NE)
5083 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5084 && (REG_P (op1) || CONST_INT_P (trueop1)))
5085 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5086 /* We cannot do this if tem is a nonzero address. */
5087 && ! nonzero_address_p (tem))
5088 return simplify_const_relational_operation (signed_condition (code),
5089 mode, tem, const0_rtx);
5091 if (! HONOR_NANS (mode) && code == ORDERED)
5092 return const_true_rtx;
5094 if (! HONOR_NANS (mode) && code == UNORDERED)
5095 return const0_rtx;
5097 /* For modes without NaNs, if the two operands are equal, we know the
5098 result except if they have side-effects. Even with NaNs we know
5099 the result of unordered comparisons and, if signaling NaNs are
5100 irrelevant, also the result of LT/GT/LTGT. */
5101 if ((! HONOR_NANS (trueop0)
5102 || code == UNEQ || code == UNLE || code == UNGE
5103 || ((code == LT || code == GT || code == LTGT)
5104 && ! HONOR_SNANS (trueop0)))
5105 && rtx_equal_p (trueop0, trueop1)
5106 && ! side_effects_p (trueop0))
5107 return comparison_result (code, CMP_EQ);
5109 /* If the operands are floating-point constants, see if we can fold
5110 the result. */
5111 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5112 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5113 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5115 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5116 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5118 /* Comparisons are unordered iff at least one of the values is NaN. */
5119 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5120 switch (code)
5122 case UNEQ:
5123 case UNLT:
5124 case UNGT:
5125 case UNLE:
5126 case UNGE:
5127 case NE:
5128 case UNORDERED:
5129 return const_true_rtx;
5130 case EQ:
5131 case LT:
5132 case GT:
5133 case LE:
5134 case GE:
5135 case LTGT:
5136 case ORDERED:
5137 return const0_rtx;
5138 default:
5139 return 0;
5142 return comparison_result (code,
5143 (real_equal (d0, d1) ? CMP_EQ :
5144 real_less (d0, d1) ? CMP_LT : CMP_GT));
5147 /* Otherwise, see if the operands are both integers. */
5148 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5149 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5151 /* It would be nice if we really had a mode here. However, the
5152 largest int representable on the target is as good as
5153 infinite. */
5154 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5155 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5156 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5158 if (wi::eq_p (ptrueop0, ptrueop1))
5159 return comparison_result (code, CMP_EQ);
5160 else
5162 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5163 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5164 return comparison_result (code, cr);
5168 /* Optimize comparisons with upper and lower bounds. */
5169 scalar_int_mode int_mode;
5170 if (CONST_INT_P (trueop1)
5171 && is_a <scalar_int_mode> (mode, &int_mode)
5172 && HWI_COMPUTABLE_MODE_P (int_mode)
5173 && !side_effects_p (trueop0))
5175 int sign;
5176 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5177 HOST_WIDE_INT val = INTVAL (trueop1);
5178 HOST_WIDE_INT mmin, mmax;
5180 if (code == GEU
5181 || code == LEU
5182 || code == GTU
5183 || code == LTU)
5184 sign = 0;
5185 else
5186 sign = 1;
5188 /* Get a reduced range if the sign bit is zero. */
5189 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5191 mmin = 0;
5192 mmax = nonzero;
5194 else
5196 rtx mmin_rtx, mmax_rtx;
5197 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5199 mmin = INTVAL (mmin_rtx);
5200 mmax = INTVAL (mmax_rtx);
5201 if (sign)
5203 unsigned int sign_copies
5204 = num_sign_bit_copies (trueop0, int_mode);
5206 mmin >>= (sign_copies - 1);
5207 mmax >>= (sign_copies - 1);
5211 switch (code)
5213 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5214 case GEU:
5215 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5216 return const_true_rtx;
5217 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5218 return const0_rtx;
5219 break;
5220 case GE:
5221 if (val <= mmin)
5222 return const_true_rtx;
5223 if (val > mmax)
5224 return const0_rtx;
5225 break;
5227 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5228 case LEU:
5229 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5230 return const_true_rtx;
5231 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5232 return const0_rtx;
5233 break;
5234 case LE:
5235 if (val >= mmax)
5236 return const_true_rtx;
5237 if (val < mmin)
5238 return const0_rtx;
5239 break;
5241 case EQ:
5242 /* x == y is always false for y out of range. */
5243 if (val < mmin || val > mmax)
5244 return const0_rtx;
5245 break;
5247 /* x > y is always false for y >= mmax, always true for y < mmin. */
5248 case GTU:
5249 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5250 return const0_rtx;
5251 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5252 return const_true_rtx;
5253 break;
5254 case GT:
5255 if (val >= mmax)
5256 return const0_rtx;
5257 if (val < mmin)
5258 return const_true_rtx;
5259 break;
5261 /* x < y is always false for y <= mmin, always true for y > mmax. */
5262 case LTU:
5263 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5264 return const0_rtx;
5265 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5266 return const_true_rtx;
5267 break;
5268 case LT:
5269 if (val <= mmin)
5270 return const0_rtx;
5271 if (val > mmax)
5272 return const_true_rtx;
5273 break;
5275 case NE:
5276 /* x != y is always true for y out of range. */
5277 if (val < mmin || val > mmax)
5278 return const_true_rtx;
5279 break;
5281 default:
5282 break;
5286 /* Optimize integer comparisons with zero. */
5287 if (is_a <scalar_int_mode> (mode, &int_mode)
5288 && trueop1 == const0_rtx
5289 && !side_effects_p (trueop0))
5291 /* Some addresses are known to be nonzero. We don't know
5292 their sign, but equality comparisons are known. */
5293 if (nonzero_address_p (trueop0))
5295 if (code == EQ || code == LEU)
5296 return const0_rtx;
5297 if (code == NE || code == GTU)
5298 return const_true_rtx;
5301 /* See if the first operand is an IOR with a constant. If so, we
5302 may be able to determine the result of this comparison. */
5303 if (GET_CODE (op0) == IOR)
5305 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5306 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5308 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5309 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5310 && (UINTVAL (inner_const)
5311 & (HOST_WIDE_INT_1U
5312 << sign_bitnum)));
5314 switch (code)
5316 case EQ:
5317 case LEU:
5318 return const0_rtx;
5319 case NE:
5320 case GTU:
5321 return const_true_rtx;
5322 case LT:
5323 case LE:
5324 if (has_sign)
5325 return const_true_rtx;
5326 break;
5327 case GT:
5328 case GE:
5329 if (has_sign)
5330 return const0_rtx;
5331 break;
5332 default:
5333 break;
5339 /* Optimize comparison of ABS with zero. */
5340 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5341 && (GET_CODE (trueop0) == ABS
5342 || (GET_CODE (trueop0) == FLOAT_EXTEND
5343 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5345 switch (code)
5347 case LT:
5348 /* Optimize abs(x) < 0.0. */
5349 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5350 return const0_rtx;
5351 break;
5353 case GE:
5354 /* Optimize abs(x) >= 0.0. */
5355 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5356 return const_true_rtx;
5357 break;
5359 case UNGE:
5360 /* Optimize ! (abs(x) < 0.0). */
5361 return const_true_rtx;
5363 default:
5364 break;
5368 return 0;
5371 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5372 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5373 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5374 can be simplified to that or NULL_RTX if not.
5375 Assume X is compared against zero with CMP_CODE and the true
5376 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5378 static rtx
5379 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5381 if (cmp_code != EQ && cmp_code != NE)
5382 return NULL_RTX;
5384 /* Result on X == 0 and X !=0 respectively. */
5385 rtx on_zero, on_nonzero;
5386 if (cmp_code == EQ)
5388 on_zero = true_val;
5389 on_nonzero = false_val;
5391 else
5393 on_zero = false_val;
5394 on_nonzero = true_val;
5397 rtx_code op_code = GET_CODE (on_nonzero);
5398 if ((op_code != CLZ && op_code != CTZ)
5399 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5400 || !CONST_INT_P (on_zero))
5401 return NULL_RTX;
5403 HOST_WIDE_INT op_val;
5404 scalar_int_mode mode ATTRIBUTE_UNUSED
5405 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5406 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5407 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5408 && op_val == INTVAL (on_zero))
5409 return on_nonzero;
5411 return NULL_RTX;
5415 /* Simplify CODE, an operation with result mode MODE and three operands,
5416 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5417 a constant. Return 0 if no simplifications is possible. */
5420 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5421 machine_mode op0_mode, rtx op0, rtx op1,
5422 rtx op2)
5424 bool any_change = false;
5425 rtx tem, trueop2;
5426 scalar_int_mode int_mode, int_op0_mode;
5428 switch (code)
5430 case FMA:
5431 /* Simplify negations around the multiplication. */
5432 /* -a * -b + c => a * b + c. */
5433 if (GET_CODE (op0) == NEG)
5435 tem = simplify_unary_operation (NEG, mode, op1, mode);
5436 if (tem)
5437 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5439 else if (GET_CODE (op1) == NEG)
5441 tem = simplify_unary_operation (NEG, mode, op0, mode);
5442 if (tem)
5443 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5446 /* Canonicalize the two multiplication operands. */
5447 /* a * -b + c => -b * a + c. */
5448 if (swap_commutative_operands_p (op0, op1))
5449 std::swap (op0, op1), any_change = true;
5451 if (any_change)
5452 return gen_rtx_FMA (mode, op0, op1, op2);
5453 return NULL_RTX;
5455 case SIGN_EXTRACT:
5456 case ZERO_EXTRACT:
5457 if (CONST_INT_P (op0)
5458 && CONST_INT_P (op1)
5459 && CONST_INT_P (op2)
5460 && is_a <scalar_int_mode> (mode, &int_mode)
5461 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5462 && HWI_COMPUTABLE_MODE_P (int_mode))
5464 /* Extracting a bit-field from a constant */
5465 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5466 HOST_WIDE_INT op1val = INTVAL (op1);
5467 HOST_WIDE_INT op2val = INTVAL (op2);
5468 if (!BITS_BIG_ENDIAN)
5469 val >>= op2val;
5470 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5471 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5472 else
5473 /* Not enough information to calculate the bit position. */
5474 break;
5476 if (HOST_BITS_PER_WIDE_INT != op1val)
5478 /* First zero-extend. */
5479 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5480 /* If desired, propagate sign bit. */
5481 if (code == SIGN_EXTRACT
5482 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5483 != 0)
5484 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5487 return gen_int_mode (val, int_mode);
5489 break;
5491 case IF_THEN_ELSE:
5492 if (CONST_INT_P (op0))
5493 return op0 != const0_rtx ? op1 : op2;
5495 /* Convert c ? a : a into "a". */
5496 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5497 return op1;
5499 /* Convert a != b ? a : b into "a". */
5500 if (GET_CODE (op0) == NE
5501 && ! side_effects_p (op0)
5502 && ! HONOR_NANS (mode)
5503 && ! HONOR_SIGNED_ZEROS (mode)
5504 && ((rtx_equal_p (XEXP (op0, 0), op1)
5505 && rtx_equal_p (XEXP (op0, 1), op2))
5506 || (rtx_equal_p (XEXP (op0, 0), op2)
5507 && rtx_equal_p (XEXP (op0, 1), op1))))
5508 return op1;
5510 /* Convert a == b ? a : b into "b". */
5511 if (GET_CODE (op0) == EQ
5512 && ! side_effects_p (op0)
5513 && ! HONOR_NANS (mode)
5514 && ! HONOR_SIGNED_ZEROS (mode)
5515 && ((rtx_equal_p (XEXP (op0, 0), op1)
5516 && rtx_equal_p (XEXP (op0, 1), op2))
5517 || (rtx_equal_p (XEXP (op0, 0), op2)
5518 && rtx_equal_p (XEXP (op0, 1), op1))))
5519 return op2;
5521 /* Convert (!c) != {0,...,0} ? a : b into
5522 c != {0,...,0} ? b : a for vector modes. */
5523 if (VECTOR_MODE_P (GET_MODE (op1))
5524 && GET_CODE (op0) == NE
5525 && GET_CODE (XEXP (op0, 0)) == NOT
5526 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5528 rtx cv = XEXP (op0, 1);
5529 int nunits = CONST_VECTOR_NUNITS (cv);
5530 bool ok = true;
5531 for (int i = 0; i < nunits; ++i)
5532 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5534 ok = false;
5535 break;
5537 if (ok)
5539 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5540 XEXP (XEXP (op0, 0), 0),
5541 XEXP (op0, 1));
5542 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5543 return retval;
5547 /* Convert x == 0 ? N : clz (x) into clz (x) when
5548 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5549 Similarly for ctz (x). */
5550 if (COMPARISON_P (op0) && !side_effects_p (op0)
5551 && XEXP (op0, 1) == const0_rtx)
5553 rtx simplified
5554 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5555 op1, op2);
5556 if (simplified)
5557 return simplified;
5560 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5562 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5563 ? GET_MODE (XEXP (op0, 1))
5564 : GET_MODE (XEXP (op0, 0)));
5565 rtx temp;
5567 /* Look for happy constants in op1 and op2. */
5568 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5570 HOST_WIDE_INT t = INTVAL (op1);
5571 HOST_WIDE_INT f = INTVAL (op2);
5573 if (t == STORE_FLAG_VALUE && f == 0)
5574 code = GET_CODE (op0);
5575 else if (t == 0 && f == STORE_FLAG_VALUE)
5577 enum rtx_code tmp;
5578 tmp = reversed_comparison_code (op0, NULL);
5579 if (tmp == UNKNOWN)
5580 break;
5581 code = tmp;
5583 else
5584 break;
5586 return simplify_gen_relational (code, mode, cmp_mode,
5587 XEXP (op0, 0), XEXP (op0, 1));
5590 if (cmp_mode == VOIDmode)
5591 cmp_mode = op0_mode;
5592 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5593 cmp_mode, XEXP (op0, 0),
5594 XEXP (op0, 1));
5596 /* See if any simplifications were possible. */
5597 if (temp)
5599 if (CONST_INT_P (temp))
5600 return temp == const0_rtx ? op2 : op1;
5601 else if (temp)
5602 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5605 break;
5607 case VEC_MERGE:
5608 gcc_assert (GET_MODE (op0) == mode);
5609 gcc_assert (GET_MODE (op1) == mode);
5610 gcc_assert (VECTOR_MODE_P (mode));
5611 trueop2 = avoid_constant_pool_reference (op2);
5612 if (CONST_INT_P (trueop2))
5614 int elt_size = GET_MODE_UNIT_SIZE (mode);
5615 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5616 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5617 unsigned HOST_WIDE_INT mask;
5618 if (n_elts == HOST_BITS_PER_WIDE_INT)
5619 mask = -1;
5620 else
5621 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5623 if (!(sel & mask) && !side_effects_p (op0))
5624 return op1;
5625 if ((sel & mask) == mask && !side_effects_p (op1))
5626 return op0;
5628 rtx trueop0 = avoid_constant_pool_reference (op0);
5629 rtx trueop1 = avoid_constant_pool_reference (op1);
5630 if (GET_CODE (trueop0) == CONST_VECTOR
5631 && GET_CODE (trueop1) == CONST_VECTOR)
5633 rtvec v = rtvec_alloc (n_elts);
5634 unsigned int i;
5636 for (i = 0; i < n_elts; i++)
5637 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5638 ? CONST_VECTOR_ELT (trueop0, i)
5639 : CONST_VECTOR_ELT (trueop1, i));
5640 return gen_rtx_CONST_VECTOR (mode, v);
5643 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5644 if no element from a appears in the result. */
5645 if (GET_CODE (op0) == VEC_MERGE)
5647 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5648 if (CONST_INT_P (tem))
5650 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5651 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5652 return simplify_gen_ternary (code, mode, mode,
5653 XEXP (op0, 1), op1, op2);
5654 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5655 return simplify_gen_ternary (code, mode, mode,
5656 XEXP (op0, 0), op1, op2);
5659 if (GET_CODE (op1) == VEC_MERGE)
5661 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5662 if (CONST_INT_P (tem))
5664 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5665 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5666 return simplify_gen_ternary (code, mode, mode,
5667 op0, XEXP (op1, 1), op2);
5668 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5669 return simplify_gen_ternary (code, mode, mode,
5670 op0, XEXP (op1, 0), op2);
5674 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5675 with a. */
5676 if (GET_CODE (op0) == VEC_DUPLICATE
5677 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5678 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5679 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5681 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5682 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5684 if (XEXP (XEXP (op0, 0), 0) == op1
5685 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5686 return op1;
5691 if (rtx_equal_p (op0, op1)
5692 && !side_effects_p (op2) && !side_effects_p (op1))
5693 return op0;
5695 break;
5697 default:
5698 gcc_unreachable ();
5701 return 0;
5704 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5705 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5706 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5708 Works by unpacking OP into a collection of 8-bit values
5709 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5710 and then repacking them again for OUTERMODE. */
5712 static rtx
5713 simplify_immed_subreg (machine_mode outermode, rtx op,
5714 machine_mode innermode, unsigned int byte)
5716 enum {
5717 value_bit = 8,
5718 value_mask = (1 << value_bit) - 1
5720 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5721 int value_start;
5722 int i;
5723 int elem;
5725 int num_elem;
5726 rtx * elems;
5727 int elem_bitsize;
5728 rtx result_s = NULL;
5729 rtvec result_v = NULL;
5730 enum mode_class outer_class;
5731 scalar_mode outer_submode;
5732 int max_bitsize;
5734 /* Some ports misuse CCmode. */
5735 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5736 return op;
5738 /* We have no way to represent a complex constant at the rtl level. */
5739 if (COMPLEX_MODE_P (outermode))
5740 return NULL_RTX;
5742 /* We support any size mode. */
5743 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5744 GET_MODE_BITSIZE (innermode));
5746 /* Unpack the value. */
5748 if (GET_CODE (op) == CONST_VECTOR)
5750 num_elem = CONST_VECTOR_NUNITS (op);
5751 elems = &CONST_VECTOR_ELT (op, 0);
5752 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5754 else
5756 num_elem = 1;
5757 elems = &op;
5758 elem_bitsize = max_bitsize;
5760 /* If this asserts, it is too complicated; reducing value_bit may help. */
5761 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5762 /* I don't know how to handle endianness of sub-units. */
5763 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5765 for (elem = 0; elem < num_elem; elem++)
5767 unsigned char * vp;
5768 rtx el = elems[elem];
5770 /* Vectors are kept in target memory order. (This is probably
5771 a mistake.) */
5773 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5774 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5775 / BITS_PER_UNIT);
5776 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5777 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5778 unsigned bytele = (subword_byte % UNITS_PER_WORD
5779 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5780 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5783 switch (GET_CODE (el))
5785 case CONST_INT:
5786 for (i = 0;
5787 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5788 i += value_bit)
5789 *vp++ = INTVAL (el) >> i;
5790 /* CONST_INTs are always logically sign-extended. */
5791 for (; i < elem_bitsize; i += value_bit)
5792 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5793 break;
5795 case CONST_WIDE_INT:
5797 rtx_mode_t val = rtx_mode_t (el, innermode);
5798 unsigned char extend = wi::sign_mask (val);
5799 int prec = wi::get_precision (val);
5801 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5802 *vp++ = wi::extract_uhwi (val, i, value_bit);
5803 for (; i < elem_bitsize; i += value_bit)
5804 *vp++ = extend;
5806 break;
5808 case CONST_DOUBLE:
5809 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5811 unsigned char extend = 0;
5812 /* If this triggers, someone should have generated a
5813 CONST_INT instead. */
5814 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5816 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5817 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5818 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5820 *vp++
5821 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5822 i += value_bit;
5825 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5826 extend = -1;
5827 for (; i < elem_bitsize; i += value_bit)
5828 *vp++ = extend;
5830 else
5832 /* This is big enough for anything on the platform. */
5833 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5834 scalar_float_mode el_mode;
5836 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5837 int bitsize = GET_MODE_BITSIZE (el_mode);
5839 gcc_assert (bitsize <= elem_bitsize);
5840 gcc_assert (bitsize % value_bit == 0);
5842 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5843 GET_MODE (el));
5845 /* real_to_target produces its result in words affected by
5846 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5847 and use WORDS_BIG_ENDIAN instead; see the documentation
5848 of SUBREG in rtl.texi. */
5849 for (i = 0; i < bitsize; i += value_bit)
5851 int ibase;
5852 if (WORDS_BIG_ENDIAN)
5853 ibase = bitsize - 1 - i;
5854 else
5855 ibase = i;
5856 *vp++ = tmp[ibase / 32] >> i % 32;
5859 /* It shouldn't matter what's done here, so fill it with
5860 zero. */
5861 for (; i < elem_bitsize; i += value_bit)
5862 *vp++ = 0;
5864 break;
5866 case CONST_FIXED:
5867 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5869 for (i = 0; i < elem_bitsize; i += value_bit)
5870 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5872 else
5874 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5875 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5876 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5877 i += value_bit)
5878 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5879 >> (i - HOST_BITS_PER_WIDE_INT);
5880 for (; i < elem_bitsize; i += value_bit)
5881 *vp++ = 0;
5883 break;
5885 default:
5886 gcc_unreachable ();
5890 /* Now, pick the right byte to start with. */
5891 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5892 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5893 will already have offset 0. */
5894 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5896 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5897 - byte);
5898 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5899 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5900 byte = (subword_byte % UNITS_PER_WORD
5901 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5904 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5905 so if it's become negative it will instead be very large.) */
5906 gcc_assert (byte < GET_MODE_SIZE (innermode));
5908 /* Convert from bytes to chunks of size value_bit. */
5909 value_start = byte * (BITS_PER_UNIT / value_bit);
5911 /* Re-pack the value. */
5912 num_elem = GET_MODE_NUNITS (outermode);
5914 if (VECTOR_MODE_P (outermode))
5916 result_v = rtvec_alloc (num_elem);
5917 elems = &RTVEC_ELT (result_v, 0);
5919 else
5920 elems = &result_s;
5922 outer_submode = GET_MODE_INNER (outermode);
5923 outer_class = GET_MODE_CLASS (outer_submode);
5924 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5926 gcc_assert (elem_bitsize % value_bit == 0);
5927 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5929 for (elem = 0; elem < num_elem; elem++)
5931 unsigned char *vp;
5933 /* Vectors are stored in target memory order. (This is probably
5934 a mistake.) */
5936 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5937 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5938 / BITS_PER_UNIT);
5939 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5940 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5941 unsigned bytele = (subword_byte % UNITS_PER_WORD
5942 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5943 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5946 switch (outer_class)
5948 case MODE_INT:
5949 case MODE_PARTIAL_INT:
5951 int u;
5952 int base = 0;
5953 int units
5954 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5955 / HOST_BITS_PER_WIDE_INT;
5956 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5957 wide_int r;
5959 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5960 return NULL_RTX;
5961 for (u = 0; u < units; u++)
5963 unsigned HOST_WIDE_INT buf = 0;
5964 for (i = 0;
5965 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5966 i += value_bit)
5967 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5969 tmp[u] = buf;
5970 base += HOST_BITS_PER_WIDE_INT;
5972 r = wide_int::from_array (tmp, units,
5973 GET_MODE_PRECISION (outer_submode));
5974 #if TARGET_SUPPORTS_WIDE_INT == 0
5975 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5976 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5977 return NULL_RTX;
5978 #endif
5979 elems[elem] = immed_wide_int_const (r, outer_submode);
5981 break;
5983 case MODE_FLOAT:
5984 case MODE_DECIMAL_FLOAT:
5986 REAL_VALUE_TYPE r;
5987 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5989 /* real_from_target wants its input in words affected by
5990 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5991 and use WORDS_BIG_ENDIAN instead; see the documentation
5992 of SUBREG in rtl.texi. */
5993 for (i = 0; i < elem_bitsize; i += value_bit)
5995 int ibase;
5996 if (WORDS_BIG_ENDIAN)
5997 ibase = elem_bitsize - 1 - i;
5998 else
5999 ibase = i;
6000 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6003 real_from_target (&r, tmp, outer_submode);
6004 elems[elem] = const_double_from_real_value (r, outer_submode);
6006 break;
6008 case MODE_FRACT:
6009 case MODE_UFRACT:
6010 case MODE_ACCUM:
6011 case MODE_UACCUM:
6013 FIXED_VALUE_TYPE f;
6014 f.data.low = 0;
6015 f.data.high = 0;
6016 f.mode = outer_submode;
6018 for (i = 0;
6019 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6020 i += value_bit)
6021 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6022 for (; i < elem_bitsize; i += value_bit)
6023 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6024 << (i - HOST_BITS_PER_WIDE_INT));
6026 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6028 break;
6030 default:
6031 gcc_unreachable ();
6034 if (VECTOR_MODE_P (outermode))
6035 return gen_rtx_CONST_VECTOR (outermode, result_v);
6036 else
6037 return result_s;
6040 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6041 Return 0 if no simplifications are possible. */
6043 simplify_subreg (machine_mode outermode, rtx op,
6044 machine_mode innermode, unsigned int byte)
6046 /* Little bit of sanity checking. */
6047 gcc_assert (innermode != VOIDmode);
6048 gcc_assert (outermode != VOIDmode);
6049 gcc_assert (innermode != BLKmode);
6050 gcc_assert (outermode != BLKmode);
6052 gcc_assert (GET_MODE (op) == innermode
6053 || GET_MODE (op) == VOIDmode);
6055 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6056 return NULL_RTX;
6058 if (byte >= GET_MODE_SIZE (innermode))
6059 return NULL_RTX;
6061 if (outermode == innermode && !byte)
6062 return op;
6064 if (CONST_SCALAR_INT_P (op)
6065 || CONST_DOUBLE_AS_FLOAT_P (op)
6066 || GET_CODE (op) == CONST_FIXED
6067 || GET_CODE (op) == CONST_VECTOR)
6068 return simplify_immed_subreg (outermode, op, innermode, byte);
6070 /* Changing mode twice with SUBREG => just change it once,
6071 or not at all if changing back op starting mode. */
6072 if (GET_CODE (op) == SUBREG)
6074 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6075 int final_offset = byte + SUBREG_BYTE (op);
6076 rtx newx;
6078 if (outermode == innermostmode
6079 && byte == 0 && SUBREG_BYTE (op) == 0)
6080 return SUBREG_REG (op);
6082 /* The SUBREG_BYTE represents offset, as if the value were stored
6083 in memory. Irritating exception is paradoxical subreg, where
6084 we define SUBREG_BYTE to be 0. On big endian machines, this
6085 value should be negative. For a moment, undo this exception. */
6086 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6088 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6089 if (WORDS_BIG_ENDIAN)
6090 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6091 if (BYTES_BIG_ENDIAN)
6092 final_offset += difference % UNITS_PER_WORD;
6094 if (SUBREG_BYTE (op) == 0
6095 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6097 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6098 if (WORDS_BIG_ENDIAN)
6099 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6100 if (BYTES_BIG_ENDIAN)
6101 final_offset += difference % UNITS_PER_WORD;
6104 /* See whether resulting subreg will be paradoxical. */
6105 if (!paradoxical_subreg_p (outermode, innermostmode))
6107 /* In nonparadoxical subregs we can't handle negative offsets. */
6108 if (final_offset < 0)
6109 return NULL_RTX;
6110 /* Bail out in case resulting subreg would be incorrect. */
6111 if (final_offset % GET_MODE_SIZE (outermode)
6112 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6113 return NULL_RTX;
6115 else
6117 int offset = 0;
6118 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6120 /* In paradoxical subreg, see if we are still looking on lower part.
6121 If so, our SUBREG_BYTE will be 0. */
6122 if (WORDS_BIG_ENDIAN)
6123 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6124 if (BYTES_BIG_ENDIAN)
6125 offset += difference % UNITS_PER_WORD;
6126 if (offset == final_offset)
6127 final_offset = 0;
6128 else
6129 return NULL_RTX;
6132 /* Recurse for further possible simplifications. */
6133 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6134 final_offset);
6135 if (newx)
6136 return newx;
6137 if (validate_subreg (outermode, innermostmode,
6138 SUBREG_REG (op), final_offset))
6140 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6141 if (SUBREG_PROMOTED_VAR_P (op)
6142 && SUBREG_PROMOTED_SIGN (op) >= 0
6143 && GET_MODE_CLASS (outermode) == MODE_INT
6144 && IN_RANGE (GET_MODE_SIZE (outermode),
6145 GET_MODE_SIZE (innermode),
6146 GET_MODE_SIZE (innermostmode))
6147 && subreg_lowpart_p (newx))
6149 SUBREG_PROMOTED_VAR_P (newx) = 1;
6150 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6152 return newx;
6154 return NULL_RTX;
6157 /* SUBREG of a hard register => just change the register number
6158 and/or mode. If the hard register is not valid in that mode,
6159 suppress this simplification. If the hard register is the stack,
6160 frame, or argument pointer, leave this as a SUBREG. */
6162 if (REG_P (op) && HARD_REGISTER_P (op))
6164 unsigned int regno, final_regno;
6166 regno = REGNO (op);
6167 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6168 if (HARD_REGISTER_NUM_P (final_regno))
6170 rtx x;
6171 int final_offset = byte;
6173 /* Adjust offset for paradoxical subregs. */
6174 if (byte == 0
6175 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6177 int difference = (GET_MODE_SIZE (innermode)
6178 - GET_MODE_SIZE (outermode));
6179 if (WORDS_BIG_ENDIAN)
6180 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6181 if (BYTES_BIG_ENDIAN)
6182 final_offset += difference % UNITS_PER_WORD;
6185 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6187 /* Propagate original regno. We don't have any way to specify
6188 the offset inside original regno, so do so only for lowpart.
6189 The information is used only by alias analysis that can not
6190 grog partial register anyway. */
6192 if (subreg_lowpart_offset (outermode, innermode) == byte)
6193 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6194 return x;
6198 /* If we have a SUBREG of a register that we are replacing and we are
6199 replacing it with a MEM, make a new MEM and try replacing the
6200 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6201 or if we would be widening it. */
6203 if (MEM_P (op)
6204 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6205 /* Allow splitting of volatile memory references in case we don't
6206 have instruction to move the whole thing. */
6207 && (! MEM_VOLATILE_P (op)
6208 || ! have_insn_for (SET, innermode))
6209 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6210 return adjust_address_nv (op, outermode, byte);
6212 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6213 of two parts. */
6214 if (GET_CODE (op) == CONCAT
6215 || GET_CODE (op) == VEC_CONCAT)
6217 unsigned int part_size, final_offset;
6218 rtx part, res;
6220 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6221 if (part_mode == VOIDmode)
6222 part_mode = GET_MODE_INNER (GET_MODE (op));
6223 part_size = GET_MODE_SIZE (part_mode);
6224 if (byte < part_size)
6226 part = XEXP (op, 0);
6227 final_offset = byte;
6229 else
6231 part = XEXP (op, 1);
6232 final_offset = byte - part_size;
6235 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6236 return NULL_RTX;
6238 part_mode = GET_MODE (part);
6239 if (part_mode == VOIDmode)
6240 part_mode = GET_MODE_INNER (GET_MODE (op));
6241 res = simplify_subreg (outermode, part, part_mode, final_offset);
6242 if (res)
6243 return res;
6244 if (validate_subreg (outermode, part_mode, part, final_offset))
6245 return gen_rtx_SUBREG (outermode, part, final_offset);
6246 return NULL_RTX;
6249 /* A SUBREG resulting from a zero extension may fold to zero if
6250 it extracts higher bits that the ZERO_EXTEND's source bits. */
6251 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6253 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6254 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6255 return CONST0_RTX (outermode);
6258 scalar_int_mode int_outermode, int_innermode;
6259 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6260 && is_a <scalar_int_mode> (innermode, &int_innermode)
6261 && (GET_MODE_PRECISION (int_outermode)
6262 < GET_MODE_PRECISION (int_innermode))
6263 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6265 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6266 if (tem)
6267 return tem;
6270 return NULL_RTX;
6273 /* Make a SUBREG operation or equivalent if it folds. */
6276 simplify_gen_subreg (machine_mode outermode, rtx op,
6277 machine_mode innermode, unsigned int byte)
6279 rtx newx;
6281 newx = simplify_subreg (outermode, op, innermode, byte);
6282 if (newx)
6283 return newx;
6285 if (GET_CODE (op) == SUBREG
6286 || GET_CODE (op) == CONCAT
6287 || GET_MODE (op) == VOIDmode)
6288 return NULL_RTX;
6290 if (validate_subreg (outermode, innermode, op, byte))
6291 return gen_rtx_SUBREG (outermode, op, byte);
6293 return NULL_RTX;
6296 /* Generates a subreg to get the least significant part of EXPR (in mode
6297 INNER_MODE) to OUTER_MODE. */
6300 lowpart_subreg (machine_mode outer_mode, rtx expr,
6301 machine_mode inner_mode)
6303 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6304 subreg_lowpart_offset (outer_mode, inner_mode));
6307 /* Simplify X, an rtx expression.
6309 Return the simplified expression or NULL if no simplifications
6310 were possible.
6312 This is the preferred entry point into the simplification routines;
6313 however, we still allow passes to call the more specific routines.
6315 Right now GCC has three (yes, three) major bodies of RTL simplification
6316 code that need to be unified.
6318 1. fold_rtx in cse.c. This code uses various CSE specific
6319 information to aid in RTL simplification.
6321 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6322 it uses combine specific information to aid in RTL
6323 simplification.
6325 3. The routines in this file.
6328 Long term we want to only have one body of simplification code; to
6329 get to that state I recommend the following steps:
6331 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6332 which are not pass dependent state into these routines.
6334 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6335 use this routine whenever possible.
6337 3. Allow for pass dependent state to be provided to these
6338 routines and add simplifications based on the pass dependent
6339 state. Remove code from cse.c & combine.c that becomes
6340 redundant/dead.
6342 It will take time, but ultimately the compiler will be easier to
6343 maintain and improve. It's totally silly that when we add a
6344 simplification that it needs to be added to 4 places (3 for RTL
6345 simplification and 1 for tree simplification. */
6348 simplify_rtx (const_rtx x)
6350 const enum rtx_code code = GET_CODE (x);
6351 const machine_mode mode = GET_MODE (x);
6353 switch (GET_RTX_CLASS (code))
6355 case RTX_UNARY:
6356 return simplify_unary_operation (code, mode,
6357 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6358 case RTX_COMM_ARITH:
6359 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6360 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6362 /* Fall through. */
6364 case RTX_BIN_ARITH:
6365 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6367 case RTX_TERNARY:
6368 case RTX_BITFIELD_OPS:
6369 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6370 XEXP (x, 0), XEXP (x, 1),
6371 XEXP (x, 2));
6373 case RTX_COMPARE:
6374 case RTX_COMM_COMPARE:
6375 return simplify_relational_operation (code, mode,
6376 ((GET_MODE (XEXP (x, 0))
6377 != VOIDmode)
6378 ? GET_MODE (XEXP (x, 0))
6379 : GET_MODE (XEXP (x, 1))),
6380 XEXP (x, 0),
6381 XEXP (x, 1));
6383 case RTX_EXTRA:
6384 if (code == SUBREG)
6385 return simplify_subreg (mode, SUBREG_REG (x),
6386 GET_MODE (SUBREG_REG (x)),
6387 SUBREG_BYTE (x));
6388 break;
6390 case RTX_OBJ:
6391 if (code == LO_SUM)
6393 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6394 if (GET_CODE (XEXP (x, 0)) == HIGH
6395 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6396 return XEXP (x, 1);
6398 break;
6400 default:
6401 break;
6403 return NULL;