[20/77] Replace MODE_INT checks with is_int_mode
[official-gcc.git] / gcc / simplify-rtx.c
blobc4fd0e9f8f7e649a8001a256f69a9c8c6c3287df
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
328 break;
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
340 rtx newx;
342 offset += MEM_OFFSET (x);
344 newx = DECL_RTL (decl);
346 if (MEM_P (newx))
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
375 return x;
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
385 rtx tem;
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
391 return gen_rtx_fmt_e (code, mode, op);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
400 rtx tem;
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
417 rtx tem;
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
443 if (__builtin_expect (fn != NULL, 0))
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
452 switch (GET_RTX_CLASS (code))
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
494 case RTX_EXTRA:
495 if (code == SUBREG)
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
505 break;
507 case RTX_OBJ:
508 if (code == MEM)
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
515 else if (code == LO_SUM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
534 break;
536 default:
537 break;
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
554 if (newvec == vec)
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
561 RTVEC_ELT (newvec, j) = op;
564 break;
566 case 'e':
567 if (XEXP (x, i))
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
577 break;
579 return newx;
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
617 should be used.
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
625 (and:DI X Y)
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 gcc_assert (precision <= op_precision);
646 /* Optimize truncations of zero and sign extended values. */
647 if (GET_CODE (op) == ZERO_EXTEND
648 || GET_CODE (op) == SIGN_EXTEND)
650 /* There are three possibilities. If MODE is the same as the
651 origmode, we can omit both the extension and the subreg.
652 If MODE is not larger than the origmode, we can apply the
653 truncation without the extension. Finally, if the outermode
654 is larger than the origmode, we can just extend to the appropriate
655 mode. */
656 machine_mode origmode = GET_MODE (XEXP (op, 0));
657 if (mode == origmode)
658 return XEXP (op, 0);
659 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
660 return simplify_gen_unary (TRUNCATE, mode,
661 XEXP (op, 0), origmode);
662 else
663 return simplify_gen_unary (GET_CODE (op), mode,
664 XEXP (op, 0), origmode);
667 /* If the machine can perform operations in the truncated mode, distribute
668 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
669 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
670 if (1
671 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
672 && (GET_CODE (op) == PLUS
673 || GET_CODE (op) == MINUS
674 || GET_CODE (op) == MULT))
676 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
677 if (op0)
679 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
680 if (op1)
681 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
685 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
686 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
687 the outer subreg is effectively a truncation to the original mode. */
688 if ((GET_CODE (op) == LSHIFTRT
689 || GET_CODE (op) == ASHIFTRT)
690 /* Ensure that OP_MODE is at least twice as wide as MODE
691 to avoid the possibility that an outer LSHIFTRT shifts by more
692 than the sign extension's sign_bit_copies and introduces zeros
693 into the high bits of the result. */
694 && 2 * precision <= op_precision
695 && CONST_INT_P (XEXP (op, 1))
696 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
697 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
698 && UINTVAL (XEXP (op, 1)) < precision)
699 return simplify_gen_binary (ASHIFTRT, mode,
700 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
702 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
703 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
704 the outer subreg is effectively a truncation to the original mode. */
705 if ((GET_CODE (op) == LSHIFTRT
706 || GET_CODE (op) == ASHIFTRT)
707 && CONST_INT_P (XEXP (op, 1))
708 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
709 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
710 && UINTVAL (XEXP (op, 1)) < precision)
711 return simplify_gen_binary (LSHIFTRT, mode,
712 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
714 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
715 to (ashift:QI (x:QI) C), where C is a suitable small constant and
716 the outer subreg is effectively a truncation to the original mode. */
717 if (GET_CODE (op) == ASHIFT
718 && CONST_INT_P (XEXP (op, 1))
719 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
720 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
721 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
722 && UINTVAL (XEXP (op, 1)) < precision)
723 return simplify_gen_binary (ASHIFT, mode,
724 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
726 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
727 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
728 and C2. */
729 if (GET_CODE (op) == AND
730 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
731 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
732 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
733 && CONST_INT_P (XEXP (op, 1)))
735 rtx op0 = (XEXP (XEXP (op, 0), 0));
736 rtx shift_op = XEXP (XEXP (op, 0), 1);
737 rtx mask_op = XEXP (op, 1);
738 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
739 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
741 if (shift < precision
742 /* If doing this transform works for an X with all bits set,
743 it works for any X. */
744 && ((GET_MODE_MASK (mode) >> shift) & mask)
745 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
746 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
747 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
749 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
750 return simplify_gen_binary (AND, mode, op0, mask_op);
754 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
755 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
756 changing len. */
757 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
758 && REG_P (XEXP (op, 0))
759 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
760 && CONST_INT_P (XEXP (op, 1))
761 && CONST_INT_P (XEXP (op, 2)))
763 rtx op0 = XEXP (op, 0);
764 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
765 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
766 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
768 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
769 if (op0)
771 pos -= op_precision - precision;
772 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
773 XEXP (op, 1), GEN_INT (pos));
776 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
778 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
779 if (op0)
780 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
781 XEXP (op, 1), XEXP (op, 2));
785 /* Recognize a word extraction from a multi-word subreg. */
786 if ((GET_CODE (op) == LSHIFTRT
787 || GET_CODE (op) == ASHIFTRT)
788 && SCALAR_INT_MODE_P (mode)
789 && SCALAR_INT_MODE_P (op_mode)
790 && precision >= BITS_PER_WORD
791 && 2 * precision <= op_precision
792 && CONST_INT_P (XEXP (op, 1))
793 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
794 && UINTVAL (XEXP (op, 1)) < op_precision)
796 int byte = subreg_lowpart_offset (mode, op_mode);
797 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
798 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
799 (WORDS_BIG_ENDIAN
800 ? byte - shifted_bytes
801 : byte + shifted_bytes));
804 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
805 and try replacing the TRUNCATE and shift with it. Don't do this
806 if the MEM has a mode-dependent address. */
807 if ((GET_CODE (op) == LSHIFTRT
808 || GET_CODE (op) == ASHIFTRT)
809 && SCALAR_INT_MODE_P (op_mode)
810 && MEM_P (XEXP (op, 0))
811 && CONST_INT_P (XEXP (op, 1))
812 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
813 && INTVAL (XEXP (op, 1)) > 0
814 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
815 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
816 MEM_ADDR_SPACE (XEXP (op, 0)))
817 && ! MEM_VOLATILE_P (XEXP (op, 0))
818 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
819 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
821 int byte = subreg_lowpart_offset (mode, op_mode);
822 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
823 return adjust_address_nv (XEXP (op, 0), mode,
824 (WORDS_BIG_ENDIAN
825 ? byte - shifted_bytes
826 : byte + shifted_bytes));
829 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
830 (OP:SI foo:SI) if OP is NEG or ABS. */
831 if ((GET_CODE (op) == ABS
832 || GET_CODE (op) == NEG)
833 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
834 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
835 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
836 return simplify_gen_unary (GET_CODE (op), mode,
837 XEXP (XEXP (op, 0), 0), mode);
839 /* (truncate:A (subreg:B (truncate:C X) 0)) is
840 (truncate:A X). */
841 if (GET_CODE (op) == SUBREG
842 && SCALAR_INT_MODE_P (mode)
843 && SCALAR_INT_MODE_P (op_mode)
844 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
845 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
846 && subreg_lowpart_p (op))
848 rtx inner = XEXP (SUBREG_REG (op), 0);
849 if (GET_MODE_PRECISION (mode)
850 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
851 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
852 else
853 /* If subreg above is paradoxical and C is narrower
854 than A, return (subreg:A (truncate:C X) 0). */
855 return simplify_gen_subreg (mode, SUBREG_REG (op),
856 GET_MODE (SUBREG_REG (op)), 0);
859 /* (truncate:A (truncate:B X)) is (truncate:A X). */
860 if (GET_CODE (op) == TRUNCATE)
861 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
862 GET_MODE (XEXP (op, 0)));
864 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
865 in mode A. */
866 if (GET_CODE (op) == IOR
867 && SCALAR_INT_MODE_P (mode)
868 && SCALAR_INT_MODE_P (op_mode)
869 && CONST_INT_P (XEXP (op, 1))
870 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
871 return constm1_rtx;
873 return NULL_RTX;
876 /* Try to simplify a unary operation CODE whose output mode is to be
877 MODE with input operand OP whose mode was originally OP_MODE.
878 Return zero if no simplification can be made. */
880 simplify_unary_operation (enum rtx_code code, machine_mode mode,
881 rtx op, machine_mode op_mode)
883 rtx trueop, tem;
885 trueop = avoid_constant_pool_reference (op);
887 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
888 if (tem)
889 return tem;
891 return simplify_unary_operation_1 (code, mode, op);
894 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
895 to be exact. */
897 static bool
898 exact_int_to_float_conversion_p (const_rtx op)
900 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
901 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
902 /* Constants shouldn't reach here. */
903 gcc_assert (op0_mode != VOIDmode);
904 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
905 int in_bits = in_prec;
906 if (HWI_COMPUTABLE_MODE_P (op0_mode))
908 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
909 if (GET_CODE (op) == FLOAT)
910 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
911 else if (GET_CODE (op) == UNSIGNED_FLOAT)
912 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
913 else
914 gcc_unreachable ();
915 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
917 return in_bits <= out_bits;
920 /* Perform some simplifications we can do even if the operands
921 aren't constant. */
922 static rtx
923 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
925 enum rtx_code reversed;
926 rtx temp;
928 switch (code)
930 case NOT:
931 /* (not (not X)) == X. */
932 if (GET_CODE (op) == NOT)
933 return XEXP (op, 0);
935 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
936 comparison is all ones. */
937 if (COMPARISON_P (op)
938 && (mode == BImode || STORE_FLAG_VALUE == -1)
939 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
940 return simplify_gen_relational (reversed, mode, VOIDmode,
941 XEXP (op, 0), XEXP (op, 1));
943 /* (not (plus X -1)) can become (neg X). */
944 if (GET_CODE (op) == PLUS
945 && XEXP (op, 1) == constm1_rtx)
946 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
948 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
949 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
950 and MODE_VECTOR_INT. */
951 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
952 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
953 CONSTM1_RTX (mode));
955 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
956 if (GET_CODE (op) == XOR
957 && CONST_INT_P (XEXP (op, 1))
958 && (temp = simplify_unary_operation (NOT, mode,
959 XEXP (op, 1), mode)) != 0)
960 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
962 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
963 if (GET_CODE (op) == PLUS
964 && CONST_INT_P (XEXP (op, 1))
965 && mode_signbit_p (mode, XEXP (op, 1))
966 && (temp = simplify_unary_operation (NOT, mode,
967 XEXP (op, 1), mode)) != 0)
968 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
971 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
972 operands other than 1, but that is not valid. We could do a
973 similar simplification for (not (lshiftrt C X)) where C is
974 just the sign bit, but this doesn't seem common enough to
975 bother with. */
976 if (GET_CODE (op) == ASHIFT
977 && XEXP (op, 0) == const1_rtx)
979 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
980 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
983 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
984 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
985 so we can perform the above simplification. */
986 if (STORE_FLAG_VALUE == -1
987 && GET_CODE (op) == ASHIFTRT
988 && CONST_INT_P (XEXP (op, 1))
989 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
990 return simplify_gen_relational (GE, mode, VOIDmode,
991 XEXP (op, 0), const0_rtx);
994 if (GET_CODE (op) == SUBREG
995 && subreg_lowpart_p (op)
996 && (GET_MODE_SIZE (GET_MODE (op))
997 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
998 && GET_CODE (SUBREG_REG (op)) == ASHIFT
999 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1001 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1002 rtx x;
1004 x = gen_rtx_ROTATE (inner_mode,
1005 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1006 inner_mode),
1007 XEXP (SUBREG_REG (op), 1));
1008 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1009 if (temp)
1010 return temp;
1013 /* Apply De Morgan's laws to reduce number of patterns for machines
1014 with negating logical insns (and-not, nand, etc.). If result has
1015 only one NOT, put it first, since that is how the patterns are
1016 coded. */
1017 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1019 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1020 machine_mode op_mode;
1022 op_mode = GET_MODE (in1);
1023 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1025 op_mode = GET_MODE (in2);
1026 if (op_mode == VOIDmode)
1027 op_mode = mode;
1028 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1030 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1031 std::swap (in1, in2);
1033 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1034 mode, in1, in2);
1037 /* (not (bswap x)) -> (bswap (not x)). */
1038 if (GET_CODE (op) == BSWAP)
1040 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1041 return simplify_gen_unary (BSWAP, mode, x, mode);
1043 break;
1045 case NEG:
1046 /* (neg (neg X)) == X. */
1047 if (GET_CODE (op) == NEG)
1048 return XEXP (op, 0);
1050 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1051 If comparison is not reversible use
1052 x ? y : (neg y). */
1053 if (GET_CODE (op) == IF_THEN_ELSE)
1055 rtx cond = XEXP (op, 0);
1056 rtx true_rtx = XEXP (op, 1);
1057 rtx false_rtx = XEXP (op, 2);
1059 if ((GET_CODE (true_rtx) == NEG
1060 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1061 || (GET_CODE (false_rtx) == NEG
1062 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1064 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1065 temp = reversed_comparison (cond, mode);
1066 else
1068 temp = cond;
1069 std::swap (true_rtx, false_rtx);
1071 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1072 mode, temp, true_rtx, false_rtx);
1076 /* (neg (plus X 1)) can become (not X). */
1077 if (GET_CODE (op) == PLUS
1078 && XEXP (op, 1) == const1_rtx)
1079 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1081 /* Similarly, (neg (not X)) is (plus X 1). */
1082 if (GET_CODE (op) == NOT)
1083 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1084 CONST1_RTX (mode));
1086 /* (neg (minus X Y)) can become (minus Y X). This transformation
1087 isn't safe for modes with signed zeros, since if X and Y are
1088 both +0, (minus Y X) is the same as (minus X Y). If the
1089 rounding mode is towards +infinity (or -infinity) then the two
1090 expressions will be rounded differently. */
1091 if (GET_CODE (op) == MINUS
1092 && !HONOR_SIGNED_ZEROS (mode)
1093 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1094 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1096 if (GET_CODE (op) == PLUS
1097 && !HONOR_SIGNED_ZEROS (mode)
1098 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1100 /* (neg (plus A C)) is simplified to (minus -C A). */
1101 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1102 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1104 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1105 if (temp)
1106 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1109 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1110 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1111 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1114 /* (neg (mult A B)) becomes (mult A (neg B)).
1115 This works even for floating-point values. */
1116 if (GET_CODE (op) == MULT
1117 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1119 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1120 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1123 /* NEG commutes with ASHIFT since it is multiplication. Only do
1124 this if we can then eliminate the NEG (e.g., if the operand
1125 is a constant). */
1126 if (GET_CODE (op) == ASHIFT)
1128 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1129 if (temp)
1130 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1133 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1134 C is equal to the width of MODE minus 1. */
1135 if (GET_CODE (op) == ASHIFTRT
1136 && CONST_INT_P (XEXP (op, 1))
1137 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1138 return simplify_gen_binary (LSHIFTRT, mode,
1139 XEXP (op, 0), XEXP (op, 1));
1141 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1142 C is equal to the width of MODE minus 1. */
1143 if (GET_CODE (op) == LSHIFTRT
1144 && CONST_INT_P (XEXP (op, 1))
1145 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1146 return simplify_gen_binary (ASHIFTRT, mode,
1147 XEXP (op, 0), XEXP (op, 1));
1149 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1150 if (GET_CODE (op) == XOR
1151 && XEXP (op, 1) == const1_rtx
1152 && nonzero_bits (XEXP (op, 0), mode) == 1)
1153 return plus_constant (mode, XEXP (op, 0), -1);
1155 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1156 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1157 if (GET_CODE (op) == LT
1158 && XEXP (op, 1) == const0_rtx
1159 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1161 machine_mode inner = GET_MODE (XEXP (op, 0));
1162 int isize = GET_MODE_PRECISION (inner);
1163 if (STORE_FLAG_VALUE == 1)
1165 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1166 GEN_INT (isize - 1));
1167 if (mode == inner)
1168 return temp;
1169 if (GET_MODE_PRECISION (mode) > isize)
1170 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1171 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1173 else if (STORE_FLAG_VALUE == -1)
1175 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1176 GEN_INT (isize - 1));
1177 if (mode == inner)
1178 return temp;
1179 if (GET_MODE_PRECISION (mode) > isize)
1180 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1181 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1184 break;
1186 case TRUNCATE:
1187 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1188 with the umulXi3_highpart patterns. */
1189 if (GET_CODE (op) == LSHIFTRT
1190 && GET_CODE (XEXP (op, 0)) == MULT)
1191 break;
1193 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1195 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1197 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1198 if (temp)
1199 return temp;
1201 /* We can't handle truncation to a partial integer mode here
1202 because we don't know the real bitsize of the partial
1203 integer mode. */
1204 break;
1207 if (GET_MODE (op) != VOIDmode)
1209 temp = simplify_truncation (mode, op, GET_MODE (op));
1210 if (temp)
1211 return temp;
1214 /* If we know that the value is already truncated, we can
1215 replace the TRUNCATE with a SUBREG. */
1216 if (GET_MODE_NUNITS (mode) == 1
1217 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1218 || truncated_to_mode (mode, op)))
1220 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1221 if (temp)
1222 return temp;
1225 /* A truncate of a comparison can be replaced with a subreg if
1226 STORE_FLAG_VALUE permits. This is like the previous test,
1227 but it works even if the comparison is done in a mode larger
1228 than HOST_BITS_PER_WIDE_INT. */
1229 if (HWI_COMPUTABLE_MODE_P (mode)
1230 && COMPARISON_P (op)
1231 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1233 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1234 if (temp)
1235 return temp;
1238 /* A truncate of a memory is just loading the low part of the memory
1239 if we are not changing the meaning of the address. */
1240 if (GET_CODE (op) == MEM
1241 && !VECTOR_MODE_P (mode)
1242 && !MEM_VOLATILE_P (op)
1243 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1245 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1246 if (temp)
1247 return temp;
1250 break;
1252 case FLOAT_TRUNCATE:
1253 if (DECIMAL_FLOAT_MODE_P (mode))
1254 break;
1256 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1257 if (GET_CODE (op) == FLOAT_EXTEND
1258 && GET_MODE (XEXP (op, 0)) == mode)
1259 return XEXP (op, 0);
1261 /* (float_truncate:SF (float_truncate:DF foo:XF))
1262 = (float_truncate:SF foo:XF).
1263 This may eliminate double rounding, so it is unsafe.
1265 (float_truncate:SF (float_extend:XF foo:DF))
1266 = (float_truncate:SF foo:DF).
1268 (float_truncate:DF (float_extend:XF foo:SF))
1269 = (float_extend:DF foo:SF). */
1270 if ((GET_CODE (op) == FLOAT_TRUNCATE
1271 && flag_unsafe_math_optimizations)
1272 || GET_CODE (op) == FLOAT_EXTEND)
1273 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1274 0)))
1275 > GET_MODE_SIZE (mode)
1276 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1277 mode,
1278 XEXP (op, 0), mode);
1280 /* (float_truncate (float x)) is (float x) */
1281 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1282 && (flag_unsafe_math_optimizations
1283 || exact_int_to_float_conversion_p (op)))
1284 return simplify_gen_unary (GET_CODE (op), mode,
1285 XEXP (op, 0),
1286 GET_MODE (XEXP (op, 0)));
1288 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1289 (OP:SF foo:SF) if OP is NEG or ABS. */
1290 if ((GET_CODE (op) == ABS
1291 || GET_CODE (op) == NEG)
1292 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1293 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1294 return simplify_gen_unary (GET_CODE (op), mode,
1295 XEXP (XEXP (op, 0), 0), mode);
1297 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1298 is (float_truncate:SF x). */
1299 if (GET_CODE (op) == SUBREG
1300 && subreg_lowpart_p (op)
1301 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1302 return SUBREG_REG (op);
1303 break;
1305 case FLOAT_EXTEND:
1306 if (DECIMAL_FLOAT_MODE_P (mode))
1307 break;
1309 /* (float_extend (float_extend x)) is (float_extend x)
1311 (float_extend (float x)) is (float x) assuming that double
1312 rounding can't happen.
1314 if (GET_CODE (op) == FLOAT_EXTEND
1315 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1316 && exact_int_to_float_conversion_p (op)))
1317 return simplify_gen_unary (GET_CODE (op), mode,
1318 XEXP (op, 0),
1319 GET_MODE (XEXP (op, 0)));
1321 break;
1323 case ABS:
1324 /* (abs (neg <foo>)) -> (abs <foo>) */
1325 if (GET_CODE (op) == NEG)
1326 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1327 GET_MODE (XEXP (op, 0)));
1329 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1330 do nothing. */
1331 if (GET_MODE (op) == VOIDmode)
1332 break;
1334 /* If operand is something known to be positive, ignore the ABS. */
1335 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1336 || val_signbit_known_clear_p (GET_MODE (op),
1337 nonzero_bits (op, GET_MODE (op))))
1338 return op;
1340 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1341 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1342 return gen_rtx_NEG (mode, op);
1344 break;
1346 case FFS:
1347 /* (ffs (*_extend <X>)) = (ffs <X>) */
1348 if (GET_CODE (op) == SIGN_EXTEND
1349 || GET_CODE (op) == ZERO_EXTEND)
1350 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1351 GET_MODE (XEXP (op, 0)));
1352 break;
1354 case POPCOUNT:
1355 switch (GET_CODE (op))
1357 case BSWAP:
1358 case ZERO_EXTEND:
1359 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1360 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1361 GET_MODE (XEXP (op, 0)));
1363 case ROTATE:
1364 case ROTATERT:
1365 /* Rotations don't affect popcount. */
1366 if (!side_effects_p (XEXP (op, 1)))
1367 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1368 GET_MODE (XEXP (op, 0)));
1369 break;
1371 default:
1372 break;
1374 break;
1376 case PARITY:
1377 switch (GET_CODE (op))
1379 case NOT:
1380 case BSWAP:
1381 case ZERO_EXTEND:
1382 case SIGN_EXTEND:
1383 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1384 GET_MODE (XEXP (op, 0)));
1386 case ROTATE:
1387 case ROTATERT:
1388 /* Rotations don't affect parity. */
1389 if (!side_effects_p (XEXP (op, 1)))
1390 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1391 GET_MODE (XEXP (op, 0)));
1392 break;
1394 default:
1395 break;
1397 break;
1399 case BSWAP:
1400 /* (bswap (bswap x)) -> x. */
1401 if (GET_CODE (op) == BSWAP)
1402 return XEXP (op, 0);
1403 break;
1405 case FLOAT:
1406 /* (float (sign_extend <X>)) = (float <X>). */
1407 if (GET_CODE (op) == SIGN_EXTEND)
1408 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1409 GET_MODE (XEXP (op, 0)));
1410 break;
1412 case SIGN_EXTEND:
1413 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1414 becomes just the MINUS if its mode is MODE. This allows
1415 folding switch statements on machines using casesi (such as
1416 the VAX). */
1417 if (GET_CODE (op) == TRUNCATE
1418 && GET_MODE (XEXP (op, 0)) == mode
1419 && GET_CODE (XEXP (op, 0)) == MINUS
1420 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1421 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1422 return XEXP (op, 0);
1424 /* Extending a widening multiplication should be canonicalized to
1425 a wider widening multiplication. */
1426 if (GET_CODE (op) == MULT)
1428 rtx lhs = XEXP (op, 0);
1429 rtx rhs = XEXP (op, 1);
1430 enum rtx_code lcode = GET_CODE (lhs);
1431 enum rtx_code rcode = GET_CODE (rhs);
1433 /* Widening multiplies usually extend both operands, but sometimes
1434 they use a shift to extract a portion of a register. */
1435 if ((lcode == SIGN_EXTEND
1436 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1437 && (rcode == SIGN_EXTEND
1438 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1440 machine_mode lmode = GET_MODE (lhs);
1441 machine_mode rmode = GET_MODE (rhs);
1442 int bits;
1444 if (lcode == ASHIFTRT)
1445 /* Number of bits not shifted off the end. */
1446 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1447 else /* lcode == SIGN_EXTEND */
1448 /* Size of inner mode. */
1449 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1451 if (rcode == ASHIFTRT)
1452 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1453 else /* rcode == SIGN_EXTEND */
1454 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1456 /* We can only widen multiplies if the result is mathematiclly
1457 equivalent. I.e. if overflow was impossible. */
1458 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1459 return simplify_gen_binary
1460 (MULT, mode,
1461 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1462 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1466 /* Check for a sign extension of a subreg of a promoted
1467 variable, where the promotion is sign-extended, and the
1468 target mode is the same as the variable's promotion. */
1469 if (GET_CODE (op) == SUBREG
1470 && SUBREG_PROMOTED_VAR_P (op)
1471 && SUBREG_PROMOTED_SIGNED_P (op)
1472 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1474 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1475 if (temp)
1476 return temp;
1479 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1480 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1481 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1483 gcc_assert (GET_MODE_PRECISION (mode)
1484 > GET_MODE_PRECISION (GET_MODE (op)));
1485 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1486 GET_MODE (XEXP (op, 0)));
1489 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1490 is (sign_extend:M (subreg:O <X>)) if there is mode with
1491 GET_MODE_BITSIZE (N) - I bits.
1492 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1493 is similarly (zero_extend:M (subreg:O <X>)). */
1494 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1495 && GET_CODE (XEXP (op, 0)) == ASHIFT
1496 && CONST_INT_P (XEXP (op, 1))
1497 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1498 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1500 scalar_int_mode tmode;
1501 gcc_assert (GET_MODE_BITSIZE (mode)
1502 > GET_MODE_BITSIZE (GET_MODE (op)));
1503 if (int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1504 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1506 rtx inner =
1507 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1508 if (inner)
1509 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1510 ? SIGN_EXTEND : ZERO_EXTEND,
1511 mode, inner, tmode);
1515 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1516 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1517 if (GET_CODE (op) == LSHIFTRT
1518 && CONST_INT_P (XEXP (op, 1))
1519 && XEXP (op, 1) != const0_rtx)
1520 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1522 #if defined(POINTERS_EXTEND_UNSIGNED)
1523 /* As we do not know which address space the pointer is referring to,
1524 we can do this only if the target does not support different pointer
1525 or address modes depending on the address space. */
1526 if (target_default_pointer_address_modes_p ()
1527 && ! POINTERS_EXTEND_UNSIGNED
1528 && mode == Pmode && GET_MODE (op) == ptr_mode
1529 && (CONSTANT_P (op)
1530 || (GET_CODE (op) == SUBREG
1531 && REG_P (SUBREG_REG (op))
1532 && REG_POINTER (SUBREG_REG (op))
1533 && GET_MODE (SUBREG_REG (op)) == Pmode))
1534 && !targetm.have_ptr_extend ())
1536 temp
1537 = convert_memory_address_addr_space_1 (Pmode, op,
1538 ADDR_SPACE_GENERIC, false,
1539 true);
1540 if (temp)
1541 return temp;
1543 #endif
1544 break;
1546 case ZERO_EXTEND:
1547 /* Check for a zero extension of a subreg of a promoted
1548 variable, where the promotion is zero-extended, and the
1549 target mode is the same as the variable's promotion. */
1550 if (GET_CODE (op) == SUBREG
1551 && SUBREG_PROMOTED_VAR_P (op)
1552 && SUBREG_PROMOTED_UNSIGNED_P (op)
1553 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1555 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1556 if (temp)
1557 return temp;
1560 /* Extending a widening multiplication should be canonicalized to
1561 a wider widening multiplication. */
1562 if (GET_CODE (op) == MULT)
1564 rtx lhs = XEXP (op, 0);
1565 rtx rhs = XEXP (op, 1);
1566 enum rtx_code lcode = GET_CODE (lhs);
1567 enum rtx_code rcode = GET_CODE (rhs);
1569 /* Widening multiplies usually extend both operands, but sometimes
1570 they use a shift to extract a portion of a register. */
1571 if ((lcode == ZERO_EXTEND
1572 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1573 && (rcode == ZERO_EXTEND
1574 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1576 machine_mode lmode = GET_MODE (lhs);
1577 machine_mode rmode = GET_MODE (rhs);
1578 int bits;
1580 if (lcode == LSHIFTRT)
1581 /* Number of bits not shifted off the end. */
1582 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1583 else /* lcode == ZERO_EXTEND */
1584 /* Size of inner mode. */
1585 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1587 if (rcode == LSHIFTRT)
1588 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1589 else /* rcode == ZERO_EXTEND */
1590 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1592 /* We can only widen multiplies if the result is mathematiclly
1593 equivalent. I.e. if overflow was impossible. */
1594 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1595 return simplify_gen_binary
1596 (MULT, mode,
1597 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1598 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1602 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1603 if (GET_CODE (op) == ZERO_EXTEND)
1604 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1605 GET_MODE (XEXP (op, 0)));
1607 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1608 is (zero_extend:M (subreg:O <X>)) if there is mode with
1609 GET_MODE_PRECISION (N) - I bits. */
1610 if (GET_CODE (op) == LSHIFTRT
1611 && GET_CODE (XEXP (op, 0)) == ASHIFT
1612 && CONST_INT_P (XEXP (op, 1))
1613 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1614 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1616 scalar_int_mode tmode;
1617 if (int_mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1618 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1620 rtx inner =
1621 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1622 if (inner)
1623 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1627 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1628 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1629 of mode N. E.g.
1630 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1631 (and:SI (reg:SI) (const_int 63)). */
1632 if (GET_CODE (op) == SUBREG
1633 && GET_MODE_PRECISION (GET_MODE (op))
1634 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1635 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1636 <= HOST_BITS_PER_WIDE_INT
1637 && GET_MODE_PRECISION (mode)
1638 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1639 && subreg_lowpart_p (op)
1640 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1641 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1643 if (GET_MODE_PRECISION (mode)
1644 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1645 return SUBREG_REG (op);
1646 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1647 GET_MODE (SUBREG_REG (op)));
1650 #if defined(POINTERS_EXTEND_UNSIGNED)
1651 /* As we do not know which address space the pointer is referring to,
1652 we can do this only if the target does not support different pointer
1653 or address modes depending on the address space. */
1654 if (target_default_pointer_address_modes_p ()
1655 && POINTERS_EXTEND_UNSIGNED > 0
1656 && mode == Pmode && GET_MODE (op) == ptr_mode
1657 && (CONSTANT_P (op)
1658 || (GET_CODE (op) == SUBREG
1659 && REG_P (SUBREG_REG (op))
1660 && REG_POINTER (SUBREG_REG (op))
1661 && GET_MODE (SUBREG_REG (op)) == Pmode))
1662 && !targetm.have_ptr_extend ())
1664 temp
1665 = convert_memory_address_addr_space_1 (Pmode, op,
1666 ADDR_SPACE_GENERIC, false,
1667 true);
1668 if (temp)
1669 return temp;
1671 #endif
1672 break;
1674 default:
1675 break;
1678 return 0;
1681 /* Try to compute the value of a unary operation CODE whose output mode is to
1682 be MODE with input operand OP whose mode was originally OP_MODE.
1683 Return zero if the value cannot be computed. */
1685 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1686 rtx op, machine_mode op_mode)
1688 unsigned int width = GET_MODE_PRECISION (mode);
1690 if (code == VEC_DUPLICATE)
1692 gcc_assert (VECTOR_MODE_P (mode));
1693 if (GET_MODE (op) != VOIDmode)
1695 if (!VECTOR_MODE_P (GET_MODE (op)))
1696 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1697 else
1698 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1699 (GET_MODE (op)));
1701 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1702 || GET_CODE (op) == CONST_VECTOR)
1704 int elt_size = GET_MODE_UNIT_SIZE (mode);
1705 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1706 rtvec v = rtvec_alloc (n_elts);
1707 unsigned int i;
1709 if (GET_CODE (op) != CONST_VECTOR)
1710 for (i = 0; i < n_elts; i++)
1711 RTVEC_ELT (v, i) = op;
1712 else
1714 machine_mode inmode = GET_MODE (op);
1715 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1716 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1718 gcc_assert (in_n_elts < n_elts);
1719 gcc_assert ((n_elts % in_n_elts) == 0);
1720 for (i = 0; i < n_elts; i++)
1721 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1723 return gen_rtx_CONST_VECTOR (mode, v);
1727 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1729 int elt_size = GET_MODE_UNIT_SIZE (mode);
1730 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1731 machine_mode opmode = GET_MODE (op);
1732 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1733 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1734 rtvec v = rtvec_alloc (n_elts);
1735 unsigned int i;
1737 gcc_assert (op_n_elts == n_elts);
1738 for (i = 0; i < n_elts; i++)
1740 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1741 CONST_VECTOR_ELT (op, i),
1742 GET_MODE_INNER (opmode));
1743 if (!x)
1744 return 0;
1745 RTVEC_ELT (v, i) = x;
1747 return gen_rtx_CONST_VECTOR (mode, v);
1750 /* The order of these tests is critical so that, for example, we don't
1751 check the wrong mode (input vs. output) for a conversion operation,
1752 such as FIX. At some point, this should be simplified. */
1754 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1756 REAL_VALUE_TYPE d;
1758 if (op_mode == VOIDmode)
1760 /* CONST_INT have VOIDmode as the mode. We assume that all
1761 the bits of the constant are significant, though, this is
1762 a dangerous assumption as many times CONST_INTs are
1763 created and used with garbage in the bits outside of the
1764 precision of the implied mode of the const_int. */
1765 op_mode = MAX_MODE_INT;
1768 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1770 /* Avoid the folding if flag_signaling_nans is on and
1771 operand is a signaling NaN. */
1772 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1773 return 0;
1775 d = real_value_truncate (mode, d);
1776 return const_double_from_real_value (d, mode);
1778 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1780 REAL_VALUE_TYPE d;
1782 if (op_mode == VOIDmode)
1784 /* CONST_INT have VOIDmode as the mode. We assume that all
1785 the bits of the constant are significant, though, this is
1786 a dangerous assumption as many times CONST_INTs are
1787 created and used with garbage in the bits outside of the
1788 precision of the implied mode of the const_int. */
1789 op_mode = MAX_MODE_INT;
1792 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1794 /* Avoid the folding if flag_signaling_nans is on and
1795 operand is a signaling NaN. */
1796 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1797 return 0;
1799 d = real_value_truncate (mode, d);
1800 return const_double_from_real_value (d, mode);
1803 if (CONST_SCALAR_INT_P (op) && width > 0)
1805 wide_int result;
1806 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1807 rtx_mode_t op0 = rtx_mode_t (op, imode);
1808 int int_value;
1810 #if TARGET_SUPPORTS_WIDE_INT == 0
1811 /* This assert keeps the simplification from producing a result
1812 that cannot be represented in a CONST_DOUBLE but a lot of
1813 upstream callers expect that this function never fails to
1814 simplify something and so you if you added this to the test
1815 above the code would die later anyway. If this assert
1816 happens, you just need to make the port support wide int. */
1817 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1818 #endif
1820 switch (code)
1822 case NOT:
1823 result = wi::bit_not (op0);
1824 break;
1826 case NEG:
1827 result = wi::neg (op0);
1828 break;
1830 case ABS:
1831 result = wi::abs (op0);
1832 break;
1834 case FFS:
1835 result = wi::shwi (wi::ffs (op0), mode);
1836 break;
1838 case CLZ:
1839 if (wi::ne_p (op0, 0))
1840 int_value = wi::clz (op0);
1841 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1842 int_value = GET_MODE_PRECISION (mode);
1843 result = wi::shwi (int_value, mode);
1844 break;
1846 case CLRSB:
1847 result = wi::shwi (wi::clrsb (op0), mode);
1848 break;
1850 case CTZ:
1851 if (wi::ne_p (op0, 0))
1852 int_value = wi::ctz (op0);
1853 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1854 int_value = GET_MODE_PRECISION (mode);
1855 result = wi::shwi (int_value, mode);
1856 break;
1858 case POPCOUNT:
1859 result = wi::shwi (wi::popcount (op0), mode);
1860 break;
1862 case PARITY:
1863 result = wi::shwi (wi::parity (op0), mode);
1864 break;
1866 case BSWAP:
1867 result = wide_int (op0).bswap ();
1868 break;
1870 case TRUNCATE:
1871 case ZERO_EXTEND:
1872 result = wide_int::from (op0, width, UNSIGNED);
1873 break;
1875 case SIGN_EXTEND:
1876 result = wide_int::from (op0, width, SIGNED);
1877 break;
1879 case SQRT:
1880 default:
1881 return 0;
1884 return immed_wide_int_const (result, mode);
1887 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1888 && SCALAR_FLOAT_MODE_P (mode)
1889 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1891 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1892 switch (code)
1894 case SQRT:
1895 return 0;
1896 case ABS:
1897 d = real_value_abs (&d);
1898 break;
1899 case NEG:
1900 d = real_value_negate (&d);
1901 break;
1902 case FLOAT_TRUNCATE:
1903 /* Don't perform the operation if flag_signaling_nans is on
1904 and the operand is a signaling NaN. */
1905 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1906 return NULL_RTX;
1907 d = real_value_truncate (mode, d);
1908 break;
1909 case FLOAT_EXTEND:
1910 /* Don't perform the operation if flag_signaling_nans is on
1911 and the operand is a signaling NaN. */
1912 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1913 return NULL_RTX;
1914 /* All this does is change the mode, unless changing
1915 mode class. */
1916 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1917 real_convert (&d, mode, &d);
1918 break;
1919 case FIX:
1920 /* Don't perform the operation if flag_signaling_nans is on
1921 and the operand is a signaling NaN. */
1922 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1923 return NULL_RTX;
1924 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1925 break;
1926 case NOT:
1928 long tmp[4];
1929 int i;
1931 real_to_target (tmp, &d, GET_MODE (op));
1932 for (i = 0; i < 4; i++)
1933 tmp[i] = ~tmp[i];
1934 real_from_target (&d, tmp, mode);
1935 break;
1937 default:
1938 gcc_unreachable ();
1940 return const_double_from_real_value (d, mode);
1942 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1943 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1944 && GET_MODE_CLASS (mode) == MODE_INT
1945 && width > 0)
1947 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1948 operators are intentionally left unspecified (to ease implementation
1949 by target backends), for consistency, this routine implements the
1950 same semantics for constant folding as used by the middle-end. */
1952 /* This was formerly used only for non-IEEE float.
1953 eggert@twinsun.com says it is safe for IEEE also. */
1954 REAL_VALUE_TYPE t;
1955 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1956 wide_int wmax, wmin;
1957 /* This is part of the abi to real_to_integer, but we check
1958 things before making this call. */
1959 bool fail;
1961 switch (code)
1963 case FIX:
1964 if (REAL_VALUE_ISNAN (*x))
1965 return const0_rtx;
1967 /* Test against the signed upper bound. */
1968 wmax = wi::max_value (width, SIGNED);
1969 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1970 if (real_less (&t, x))
1971 return immed_wide_int_const (wmax, mode);
1973 /* Test against the signed lower bound. */
1974 wmin = wi::min_value (width, SIGNED);
1975 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1976 if (real_less (x, &t))
1977 return immed_wide_int_const (wmin, mode);
1979 return immed_wide_int_const (real_to_integer (x, &fail, width),
1980 mode);
1982 case UNSIGNED_FIX:
1983 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1984 return const0_rtx;
1986 /* Test against the unsigned upper bound. */
1987 wmax = wi::max_value (width, UNSIGNED);
1988 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1989 if (real_less (&t, x))
1990 return immed_wide_int_const (wmax, mode);
1992 return immed_wide_int_const (real_to_integer (x, &fail, width),
1993 mode);
1995 default:
1996 gcc_unreachable ();
2000 return NULL_RTX;
2003 /* Subroutine of simplify_binary_operation to simplify a binary operation
2004 CODE that can commute with byte swapping, with result mode MODE and
2005 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2006 Return zero if no simplification or canonicalization is possible. */
2008 static rtx
2009 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2010 rtx op0, rtx op1)
2012 rtx tem;
2014 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2015 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2017 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2018 simplify_gen_unary (BSWAP, mode, op1, mode));
2019 return simplify_gen_unary (BSWAP, mode, tem, mode);
2022 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2023 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2025 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2026 return simplify_gen_unary (BSWAP, mode, tem, mode);
2029 return NULL_RTX;
2032 /* Subroutine of simplify_binary_operation to simplify a commutative,
2033 associative binary operation CODE with result mode MODE, operating
2034 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2035 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2036 canonicalization is possible. */
2038 static rtx
2039 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2040 rtx op0, rtx op1)
2042 rtx tem;
2044 /* Linearize the operator to the left. */
2045 if (GET_CODE (op1) == code)
2047 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2048 if (GET_CODE (op0) == code)
2050 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2051 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2054 /* "a op (b op c)" becomes "(b op c) op a". */
2055 if (! swap_commutative_operands_p (op1, op0))
2056 return simplify_gen_binary (code, mode, op1, op0);
2058 std::swap (op0, op1);
2061 if (GET_CODE (op0) == code)
2063 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2064 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2066 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2067 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2070 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2071 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2072 if (tem != 0)
2073 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2075 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2076 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2077 if (tem != 0)
2078 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2081 return 0;
2085 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2086 and OP1. Return 0 if no simplification is possible.
2088 Don't use this for relational operations such as EQ or LT.
2089 Use simplify_relational_operation instead. */
2091 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2092 rtx op0, rtx op1)
2094 rtx trueop0, trueop1;
2095 rtx tem;
2097 /* Relational operations don't work here. We must know the mode
2098 of the operands in order to do the comparison correctly.
2099 Assuming a full word can give incorrect results.
2100 Consider comparing 128 with -128 in QImode. */
2101 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2102 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2104 /* Make sure the constant is second. */
2105 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2106 && swap_commutative_operands_p (op0, op1))
2107 std::swap (op0, op1);
2109 trueop0 = avoid_constant_pool_reference (op0);
2110 trueop1 = avoid_constant_pool_reference (op1);
2112 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2113 if (tem)
2114 return tem;
2115 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2117 if (tem)
2118 return tem;
2120 /* If the above steps did not result in a simplification and op0 or op1
2121 were constant pool references, use the referenced constants directly. */
2122 if (trueop0 != op0 || trueop1 != op1)
2123 return simplify_gen_binary (code, mode, trueop0, trueop1);
2125 return NULL_RTX;
2128 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2129 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2130 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2131 actual constants. */
2133 static rtx
2134 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2135 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2137 rtx tem, reversed, opleft, opright;
2138 HOST_WIDE_INT val;
2139 unsigned int width = GET_MODE_PRECISION (mode);
2141 /* Even if we can't compute a constant result,
2142 there are some cases worth simplifying. */
2144 switch (code)
2146 case PLUS:
2147 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2148 when x is NaN, infinite, or finite and nonzero. They aren't
2149 when x is -0 and the rounding mode is not towards -infinity,
2150 since (-0) + 0 is then 0. */
2151 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2152 return op0;
2154 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2155 transformations are safe even for IEEE. */
2156 if (GET_CODE (op0) == NEG)
2157 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2158 else if (GET_CODE (op1) == NEG)
2159 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2161 /* (~a) + 1 -> -a */
2162 if (INTEGRAL_MODE_P (mode)
2163 && GET_CODE (op0) == NOT
2164 && trueop1 == const1_rtx)
2165 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2167 /* Handle both-operands-constant cases. We can only add
2168 CONST_INTs to constants since the sum of relocatable symbols
2169 can't be handled by most assemblers. Don't add CONST_INT
2170 to CONST_INT since overflow won't be computed properly if wider
2171 than HOST_BITS_PER_WIDE_INT. */
2173 if ((GET_CODE (op0) == CONST
2174 || GET_CODE (op0) == SYMBOL_REF
2175 || GET_CODE (op0) == LABEL_REF)
2176 && CONST_INT_P (op1))
2177 return plus_constant (mode, op0, INTVAL (op1));
2178 else if ((GET_CODE (op1) == CONST
2179 || GET_CODE (op1) == SYMBOL_REF
2180 || GET_CODE (op1) == LABEL_REF)
2181 && CONST_INT_P (op0))
2182 return plus_constant (mode, op1, INTVAL (op0));
2184 /* See if this is something like X * C - X or vice versa or
2185 if the multiplication is written as a shift. If so, we can
2186 distribute and make a new multiply, shift, or maybe just
2187 have X (if C is 2 in the example above). But don't make
2188 something more expensive than we had before. */
2190 if (SCALAR_INT_MODE_P (mode))
2192 rtx lhs = op0, rhs = op1;
2194 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2195 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2197 if (GET_CODE (lhs) == NEG)
2199 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2200 lhs = XEXP (lhs, 0);
2202 else if (GET_CODE (lhs) == MULT
2203 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2205 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2206 lhs = XEXP (lhs, 0);
2208 else if (GET_CODE (lhs) == ASHIFT
2209 && CONST_INT_P (XEXP (lhs, 1))
2210 && INTVAL (XEXP (lhs, 1)) >= 0
2211 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2213 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2214 GET_MODE_PRECISION (mode));
2215 lhs = XEXP (lhs, 0);
2218 if (GET_CODE (rhs) == NEG)
2220 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2221 rhs = XEXP (rhs, 0);
2223 else if (GET_CODE (rhs) == MULT
2224 && CONST_INT_P (XEXP (rhs, 1)))
2226 coeff1 = rtx_mode_t (XEXP (rhs, 1), mode);
2227 rhs = XEXP (rhs, 0);
2229 else if (GET_CODE (rhs) == ASHIFT
2230 && CONST_INT_P (XEXP (rhs, 1))
2231 && INTVAL (XEXP (rhs, 1)) >= 0
2232 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2234 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2235 GET_MODE_PRECISION (mode));
2236 rhs = XEXP (rhs, 0);
2239 if (rtx_equal_p (lhs, rhs))
2241 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2242 rtx coeff;
2243 bool speed = optimize_function_for_speed_p (cfun);
2245 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2247 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2248 return (set_src_cost (tem, mode, speed)
2249 <= set_src_cost (orig, mode, speed) ? tem : 0);
2253 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2254 if (CONST_SCALAR_INT_P (op1)
2255 && GET_CODE (op0) == XOR
2256 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2257 && mode_signbit_p (mode, op1))
2258 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2259 simplify_gen_binary (XOR, mode, op1,
2260 XEXP (op0, 1)));
2262 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2263 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2264 && GET_CODE (op0) == MULT
2265 && GET_CODE (XEXP (op0, 0)) == NEG)
2267 rtx in1, in2;
2269 in1 = XEXP (XEXP (op0, 0), 0);
2270 in2 = XEXP (op0, 1);
2271 return simplify_gen_binary (MINUS, mode, op1,
2272 simplify_gen_binary (MULT, mode,
2273 in1, in2));
2276 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2277 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2278 is 1. */
2279 if (COMPARISON_P (op0)
2280 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2281 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2282 && (reversed = reversed_comparison (op0, mode)))
2283 return
2284 simplify_gen_unary (NEG, mode, reversed, mode);
2286 /* If one of the operands is a PLUS or a MINUS, see if we can
2287 simplify this by the associative law.
2288 Don't use the associative law for floating point.
2289 The inaccuracy makes it nonassociative,
2290 and subtle programs can break if operations are associated. */
2292 if (INTEGRAL_MODE_P (mode)
2293 && (plus_minus_operand_p (op0)
2294 || plus_minus_operand_p (op1))
2295 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2296 return tem;
2298 /* Reassociate floating point addition only when the user
2299 specifies associative math operations. */
2300 if (FLOAT_MODE_P (mode)
2301 && flag_associative_math)
2303 tem = simplify_associative_operation (code, mode, op0, op1);
2304 if (tem)
2305 return tem;
2307 break;
2309 case COMPARE:
2310 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2311 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2312 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2313 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2315 rtx xop00 = XEXP (op0, 0);
2316 rtx xop10 = XEXP (op1, 0);
2318 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2319 return xop00;
2321 if (REG_P (xop00) && REG_P (xop10)
2322 && REGNO (xop00) == REGNO (xop10)
2323 && GET_MODE (xop00) == mode
2324 && GET_MODE (xop10) == mode
2325 && GET_MODE_CLASS (mode) == MODE_CC)
2326 return xop00;
2328 break;
2330 case MINUS:
2331 /* We can't assume x-x is 0 even with non-IEEE floating point,
2332 but since it is zero except in very strange circumstances, we
2333 will treat it as zero with -ffinite-math-only. */
2334 if (rtx_equal_p (trueop0, trueop1)
2335 && ! side_effects_p (op0)
2336 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2337 return CONST0_RTX (mode);
2339 /* Change subtraction from zero into negation. (0 - x) is the
2340 same as -x when x is NaN, infinite, or finite and nonzero.
2341 But if the mode has signed zeros, and does not round towards
2342 -infinity, then 0 - 0 is 0, not -0. */
2343 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2344 return simplify_gen_unary (NEG, mode, op1, mode);
2346 /* (-1 - a) is ~a, unless the expression contains symbolic
2347 constants, in which case not retaining additions and
2348 subtractions could cause invalid assembly to be produced. */
2349 if (trueop0 == constm1_rtx
2350 && !contains_symbolic_reference_p (op1))
2351 return simplify_gen_unary (NOT, mode, op1, mode);
2353 /* Subtracting 0 has no effect unless the mode has signed zeros
2354 and supports rounding towards -infinity. In such a case,
2355 0 - 0 is -0. */
2356 if (!(HONOR_SIGNED_ZEROS (mode)
2357 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2358 && trueop1 == CONST0_RTX (mode))
2359 return op0;
2361 /* See if this is something like X * C - X or vice versa or
2362 if the multiplication is written as a shift. If so, we can
2363 distribute and make a new multiply, shift, or maybe just
2364 have X (if C is 2 in the example above). But don't make
2365 something more expensive than we had before. */
2367 if (SCALAR_INT_MODE_P (mode))
2369 rtx lhs = op0, rhs = op1;
2371 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2372 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2374 if (GET_CODE (lhs) == NEG)
2376 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2377 lhs = XEXP (lhs, 0);
2379 else if (GET_CODE (lhs) == MULT
2380 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2382 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2383 lhs = XEXP (lhs, 0);
2385 else if (GET_CODE (lhs) == ASHIFT
2386 && CONST_INT_P (XEXP (lhs, 1))
2387 && INTVAL (XEXP (lhs, 1)) >= 0
2388 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2390 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2391 GET_MODE_PRECISION (mode));
2392 lhs = XEXP (lhs, 0);
2395 if (GET_CODE (rhs) == NEG)
2397 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2398 rhs = XEXP (rhs, 0);
2400 else if (GET_CODE (rhs) == MULT
2401 && CONST_INT_P (XEXP (rhs, 1)))
2403 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), mode));
2404 rhs = XEXP (rhs, 0);
2406 else if (GET_CODE (rhs) == ASHIFT
2407 && CONST_INT_P (XEXP (rhs, 1))
2408 && INTVAL (XEXP (rhs, 1)) >= 0
2409 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2411 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2412 GET_MODE_PRECISION (mode));
2413 negcoeff1 = -negcoeff1;
2414 rhs = XEXP (rhs, 0);
2417 if (rtx_equal_p (lhs, rhs))
2419 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2420 rtx coeff;
2421 bool speed = optimize_function_for_speed_p (cfun);
2423 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2425 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2426 return (set_src_cost (tem, mode, speed)
2427 <= set_src_cost (orig, mode, speed) ? tem : 0);
2431 /* (a - (-b)) -> (a + b). True even for IEEE. */
2432 if (GET_CODE (op1) == NEG)
2433 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2435 /* (-x - c) may be simplified as (-c - x). */
2436 if (GET_CODE (op0) == NEG
2437 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2439 tem = simplify_unary_operation (NEG, mode, op1, mode);
2440 if (tem)
2441 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2444 /* Don't let a relocatable value get a negative coeff. */
2445 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2446 return simplify_gen_binary (PLUS, mode,
2447 op0,
2448 neg_const_int (mode, op1));
2450 /* (x - (x & y)) -> (x & ~y) */
2451 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2453 if (rtx_equal_p (op0, XEXP (op1, 0)))
2455 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2456 GET_MODE (XEXP (op1, 1)));
2457 return simplify_gen_binary (AND, mode, op0, tem);
2459 if (rtx_equal_p (op0, XEXP (op1, 1)))
2461 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2462 GET_MODE (XEXP (op1, 0)));
2463 return simplify_gen_binary (AND, mode, op0, tem);
2467 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2468 by reversing the comparison code if valid. */
2469 if (STORE_FLAG_VALUE == 1
2470 && trueop0 == const1_rtx
2471 && COMPARISON_P (op1)
2472 && (reversed = reversed_comparison (op1, mode)))
2473 return reversed;
2475 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2476 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2477 && GET_CODE (op1) == MULT
2478 && GET_CODE (XEXP (op1, 0)) == NEG)
2480 rtx in1, in2;
2482 in1 = XEXP (XEXP (op1, 0), 0);
2483 in2 = XEXP (op1, 1);
2484 return simplify_gen_binary (PLUS, mode,
2485 simplify_gen_binary (MULT, mode,
2486 in1, in2),
2487 op0);
2490 /* Canonicalize (minus (neg A) (mult B C)) to
2491 (minus (mult (neg B) C) A). */
2492 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2493 && GET_CODE (op1) == MULT
2494 && GET_CODE (op0) == NEG)
2496 rtx in1, in2;
2498 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2499 in2 = XEXP (op1, 1);
2500 return simplify_gen_binary (MINUS, mode,
2501 simplify_gen_binary (MULT, mode,
2502 in1, in2),
2503 XEXP (op0, 0));
2506 /* If one of the operands is a PLUS or a MINUS, see if we can
2507 simplify this by the associative law. This will, for example,
2508 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2509 Don't use the associative law for floating point.
2510 The inaccuracy makes it nonassociative,
2511 and subtle programs can break if operations are associated. */
2513 if (INTEGRAL_MODE_P (mode)
2514 && (plus_minus_operand_p (op0)
2515 || plus_minus_operand_p (op1))
2516 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2517 return tem;
2518 break;
2520 case MULT:
2521 if (trueop1 == constm1_rtx)
2522 return simplify_gen_unary (NEG, mode, op0, mode);
2524 if (GET_CODE (op0) == NEG)
2526 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2527 /* If op1 is a MULT as well and simplify_unary_operation
2528 just moved the NEG to the second operand, simplify_gen_binary
2529 below could through simplify_associative_operation move
2530 the NEG around again and recurse endlessly. */
2531 if (temp
2532 && GET_CODE (op1) == MULT
2533 && GET_CODE (temp) == MULT
2534 && XEXP (op1, 0) == XEXP (temp, 0)
2535 && GET_CODE (XEXP (temp, 1)) == NEG
2536 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2537 temp = NULL_RTX;
2538 if (temp)
2539 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2541 if (GET_CODE (op1) == NEG)
2543 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2544 /* If op0 is a MULT as well and simplify_unary_operation
2545 just moved the NEG to the second operand, simplify_gen_binary
2546 below could through simplify_associative_operation move
2547 the NEG around again and recurse endlessly. */
2548 if (temp
2549 && GET_CODE (op0) == MULT
2550 && GET_CODE (temp) == MULT
2551 && XEXP (op0, 0) == XEXP (temp, 0)
2552 && GET_CODE (XEXP (temp, 1)) == NEG
2553 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2554 temp = NULL_RTX;
2555 if (temp)
2556 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2559 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2560 x is NaN, since x * 0 is then also NaN. Nor is it valid
2561 when the mode has signed zeros, since multiplying a negative
2562 number by 0 will give -0, not 0. */
2563 if (!HONOR_NANS (mode)
2564 && !HONOR_SIGNED_ZEROS (mode)
2565 && trueop1 == CONST0_RTX (mode)
2566 && ! side_effects_p (op0))
2567 return op1;
2569 /* In IEEE floating point, x*1 is not equivalent to x for
2570 signalling NaNs. */
2571 if (!HONOR_SNANS (mode)
2572 && trueop1 == CONST1_RTX (mode))
2573 return op0;
2575 /* Convert multiply by constant power of two into shift. */
2576 if (CONST_SCALAR_INT_P (trueop1))
2578 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2579 if (val >= 0)
2580 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2583 /* x*2 is x+x and x*(-1) is -x */
2584 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2585 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2586 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2587 && GET_MODE (op0) == mode)
2589 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2591 if (real_equal (d1, &dconst2))
2592 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2594 if (!HONOR_SNANS (mode)
2595 && real_equal (d1, &dconstm1))
2596 return simplify_gen_unary (NEG, mode, op0, mode);
2599 /* Optimize -x * -x as x * x. */
2600 if (FLOAT_MODE_P (mode)
2601 && GET_CODE (op0) == NEG
2602 && GET_CODE (op1) == NEG
2603 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2604 && !side_effects_p (XEXP (op0, 0)))
2605 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2607 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2608 if (SCALAR_FLOAT_MODE_P (mode)
2609 && GET_CODE (op0) == ABS
2610 && GET_CODE (op1) == ABS
2611 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2612 && !side_effects_p (XEXP (op0, 0)))
2613 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2615 /* Reassociate multiplication, but for floating point MULTs
2616 only when the user specifies unsafe math optimizations. */
2617 if (! FLOAT_MODE_P (mode)
2618 || flag_unsafe_math_optimizations)
2620 tem = simplify_associative_operation (code, mode, op0, op1);
2621 if (tem)
2622 return tem;
2624 break;
2626 case IOR:
2627 if (trueop1 == CONST0_RTX (mode))
2628 return op0;
2629 if (INTEGRAL_MODE_P (mode)
2630 && trueop1 == CONSTM1_RTX (mode)
2631 && !side_effects_p (op0))
2632 return op1;
2633 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2634 return op0;
2635 /* A | (~A) -> -1 */
2636 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2637 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2638 && ! side_effects_p (op0)
2639 && SCALAR_INT_MODE_P (mode))
2640 return constm1_rtx;
2642 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2643 if (CONST_INT_P (op1)
2644 && HWI_COMPUTABLE_MODE_P (mode)
2645 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2646 && !side_effects_p (op0))
2647 return op1;
2649 /* Canonicalize (X & C1) | C2. */
2650 if (GET_CODE (op0) == AND
2651 && CONST_INT_P (trueop1)
2652 && CONST_INT_P (XEXP (op0, 1)))
2654 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2655 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2656 HOST_WIDE_INT c2 = INTVAL (trueop1);
2658 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2659 if ((c1 & c2) == c1
2660 && !side_effects_p (XEXP (op0, 0)))
2661 return trueop1;
2663 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2664 if (((c1|c2) & mask) == mask)
2665 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2667 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2668 if (((c1 & ~c2) & mask) != (c1 & mask))
2670 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2671 gen_int_mode (c1 & ~c2, mode));
2672 return simplify_gen_binary (IOR, mode, tem, op1);
2676 /* Convert (A & B) | A to A. */
2677 if (GET_CODE (op0) == AND
2678 && (rtx_equal_p (XEXP (op0, 0), op1)
2679 || rtx_equal_p (XEXP (op0, 1), op1))
2680 && ! side_effects_p (XEXP (op0, 0))
2681 && ! side_effects_p (XEXP (op0, 1)))
2682 return op1;
2684 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2685 mode size to (rotate A CX). */
2687 if (GET_CODE (op1) == ASHIFT
2688 || GET_CODE (op1) == SUBREG)
2690 opleft = op1;
2691 opright = op0;
2693 else
2695 opright = op1;
2696 opleft = op0;
2699 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2700 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2701 && CONST_INT_P (XEXP (opleft, 1))
2702 && CONST_INT_P (XEXP (opright, 1))
2703 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2704 == GET_MODE_PRECISION (mode)))
2705 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2707 /* Same, but for ashift that has been "simplified" to a wider mode
2708 by simplify_shift_const. */
2710 if (GET_CODE (opleft) == SUBREG
2711 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2712 && GET_CODE (opright) == LSHIFTRT
2713 && GET_CODE (XEXP (opright, 0)) == SUBREG
2714 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2715 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2716 && (GET_MODE_SIZE (GET_MODE (opleft))
2717 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2718 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2719 SUBREG_REG (XEXP (opright, 0)))
2720 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2721 && CONST_INT_P (XEXP (opright, 1))
2722 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2723 == GET_MODE_PRECISION (mode)))
2724 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2725 XEXP (SUBREG_REG (opleft), 1));
2727 /* If we have (ior (and (X C1) C2)), simplify this by making
2728 C1 as small as possible if C1 actually changes. */
2729 if (CONST_INT_P (op1)
2730 && (HWI_COMPUTABLE_MODE_P (mode)
2731 || INTVAL (op1) > 0)
2732 && GET_CODE (op0) == AND
2733 && CONST_INT_P (XEXP (op0, 1))
2734 && CONST_INT_P (op1)
2735 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2737 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2738 gen_int_mode (UINTVAL (XEXP (op0, 1))
2739 & ~UINTVAL (op1),
2740 mode));
2741 return simplify_gen_binary (IOR, mode, tmp, op1);
2744 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2745 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2746 the PLUS does not affect any of the bits in OP1: then we can do
2747 the IOR as a PLUS and we can associate. This is valid if OP1
2748 can be safely shifted left C bits. */
2749 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2750 && GET_CODE (XEXP (op0, 0)) == PLUS
2751 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2752 && CONST_INT_P (XEXP (op0, 1))
2753 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2755 int count = INTVAL (XEXP (op0, 1));
2756 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2758 if (mask >> count == INTVAL (trueop1)
2759 && trunc_int_for_mode (mask, mode) == mask
2760 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2761 return simplify_gen_binary (ASHIFTRT, mode,
2762 plus_constant (mode, XEXP (op0, 0),
2763 mask),
2764 XEXP (op0, 1));
2767 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2768 if (tem)
2769 return tem;
2771 tem = simplify_associative_operation (code, mode, op0, op1);
2772 if (tem)
2773 return tem;
2774 break;
2776 case XOR:
2777 if (trueop1 == CONST0_RTX (mode))
2778 return op0;
2779 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2780 return simplify_gen_unary (NOT, mode, op0, mode);
2781 if (rtx_equal_p (trueop0, trueop1)
2782 && ! side_effects_p (op0)
2783 && GET_MODE_CLASS (mode) != MODE_CC)
2784 return CONST0_RTX (mode);
2786 /* Canonicalize XOR of the most significant bit to PLUS. */
2787 if (CONST_SCALAR_INT_P (op1)
2788 && mode_signbit_p (mode, op1))
2789 return simplify_gen_binary (PLUS, mode, op0, op1);
2790 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2791 if (CONST_SCALAR_INT_P (op1)
2792 && GET_CODE (op0) == PLUS
2793 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2794 && mode_signbit_p (mode, XEXP (op0, 1)))
2795 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2796 simplify_gen_binary (XOR, mode, op1,
2797 XEXP (op0, 1)));
2799 /* If we are XORing two things that have no bits in common,
2800 convert them into an IOR. This helps to detect rotation encoded
2801 using those methods and possibly other simplifications. */
2803 if (HWI_COMPUTABLE_MODE_P (mode)
2804 && (nonzero_bits (op0, mode)
2805 & nonzero_bits (op1, mode)) == 0)
2806 return (simplify_gen_binary (IOR, mode, op0, op1));
2808 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2809 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2810 (NOT y). */
2812 int num_negated = 0;
2814 if (GET_CODE (op0) == NOT)
2815 num_negated++, op0 = XEXP (op0, 0);
2816 if (GET_CODE (op1) == NOT)
2817 num_negated++, op1 = XEXP (op1, 0);
2819 if (num_negated == 2)
2820 return simplify_gen_binary (XOR, mode, op0, op1);
2821 else if (num_negated == 1)
2822 return simplify_gen_unary (NOT, mode,
2823 simplify_gen_binary (XOR, mode, op0, op1),
2824 mode);
2827 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2828 correspond to a machine insn or result in further simplifications
2829 if B is a constant. */
2831 if (GET_CODE (op0) == AND
2832 && rtx_equal_p (XEXP (op0, 1), op1)
2833 && ! side_effects_p (op1))
2834 return simplify_gen_binary (AND, mode,
2835 simplify_gen_unary (NOT, mode,
2836 XEXP (op0, 0), mode),
2837 op1);
2839 else if (GET_CODE (op0) == AND
2840 && rtx_equal_p (XEXP (op0, 0), op1)
2841 && ! side_effects_p (op1))
2842 return simplify_gen_binary (AND, mode,
2843 simplify_gen_unary (NOT, mode,
2844 XEXP (op0, 1), mode),
2845 op1);
2847 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2848 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2849 out bits inverted twice and not set by C. Similarly, given
2850 (xor (and (xor A B) C) D), simplify without inverting C in
2851 the xor operand: (xor (and A C) (B&C)^D).
2853 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2854 && GET_CODE (XEXP (op0, 0)) == XOR
2855 && CONST_INT_P (op1)
2856 && CONST_INT_P (XEXP (op0, 1))
2857 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2859 enum rtx_code op = GET_CODE (op0);
2860 rtx a = XEXP (XEXP (op0, 0), 0);
2861 rtx b = XEXP (XEXP (op0, 0), 1);
2862 rtx c = XEXP (op0, 1);
2863 rtx d = op1;
2864 HOST_WIDE_INT bval = INTVAL (b);
2865 HOST_WIDE_INT cval = INTVAL (c);
2866 HOST_WIDE_INT dval = INTVAL (d);
2867 HOST_WIDE_INT xcval;
2869 if (op == IOR)
2870 xcval = ~cval;
2871 else
2872 xcval = cval;
2874 return simplify_gen_binary (XOR, mode,
2875 simplify_gen_binary (op, mode, a, c),
2876 gen_int_mode ((bval & xcval) ^ dval,
2877 mode));
2880 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2881 we can transform like this:
2882 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2883 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2884 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2885 Attempt a few simplifications when B and C are both constants. */
2886 if (GET_CODE (op0) == AND
2887 && CONST_INT_P (op1)
2888 && CONST_INT_P (XEXP (op0, 1)))
2890 rtx a = XEXP (op0, 0);
2891 rtx b = XEXP (op0, 1);
2892 rtx c = op1;
2893 HOST_WIDE_INT bval = INTVAL (b);
2894 HOST_WIDE_INT cval = INTVAL (c);
2896 /* Instead of computing ~A&C, we compute its negated value,
2897 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2898 optimize for sure. If it does not simplify, we still try
2899 to compute ~A&C below, but since that always allocates
2900 RTL, we don't try that before committing to returning a
2901 simplified expression. */
2902 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2903 GEN_INT (~cval));
2905 if ((~cval & bval) == 0)
2907 rtx na_c = NULL_RTX;
2908 if (n_na_c)
2909 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2910 else
2912 /* If ~A does not simplify, don't bother: we don't
2913 want to simplify 2 operations into 3, and if na_c
2914 were to simplify with na, n_na_c would have
2915 simplified as well. */
2916 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2917 if (na)
2918 na_c = simplify_gen_binary (AND, mode, na, c);
2921 /* Try to simplify ~A&C | ~B&C. */
2922 if (na_c != NULL_RTX)
2923 return simplify_gen_binary (IOR, mode, na_c,
2924 gen_int_mode (~bval & cval, mode));
2926 else
2928 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2929 if (n_na_c == CONSTM1_RTX (mode))
2931 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2932 gen_int_mode (~cval & bval,
2933 mode));
2934 return simplify_gen_binary (IOR, mode, a_nc_b,
2935 gen_int_mode (~bval & cval,
2936 mode));
2941 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2942 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2943 machines, and also has shorter instruction path length. */
2944 if (GET_CODE (op0) == AND
2945 && GET_CODE (XEXP (op0, 0)) == XOR
2946 && CONST_INT_P (XEXP (op0, 1))
2947 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2949 rtx a = trueop1;
2950 rtx b = XEXP (XEXP (op0, 0), 1);
2951 rtx c = XEXP (op0, 1);
2952 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2953 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2954 rtx bc = simplify_gen_binary (AND, mode, b, c);
2955 return simplify_gen_binary (IOR, mode, a_nc, bc);
2957 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2958 else if (GET_CODE (op0) == AND
2959 && GET_CODE (XEXP (op0, 0)) == XOR
2960 && CONST_INT_P (XEXP (op0, 1))
2961 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2963 rtx a = XEXP (XEXP (op0, 0), 0);
2964 rtx b = trueop1;
2965 rtx c = XEXP (op0, 1);
2966 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2967 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2968 rtx ac = simplify_gen_binary (AND, mode, a, c);
2969 return simplify_gen_binary (IOR, mode, ac, b_nc);
2972 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2973 comparison if STORE_FLAG_VALUE is 1. */
2974 if (STORE_FLAG_VALUE == 1
2975 && trueop1 == const1_rtx
2976 && COMPARISON_P (op0)
2977 && (reversed = reversed_comparison (op0, mode)))
2978 return reversed;
2980 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2981 is (lt foo (const_int 0)), so we can perform the above
2982 simplification if STORE_FLAG_VALUE is 1. */
2984 if (STORE_FLAG_VALUE == 1
2985 && trueop1 == const1_rtx
2986 && GET_CODE (op0) == LSHIFTRT
2987 && CONST_INT_P (XEXP (op0, 1))
2988 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2989 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2991 /* (xor (comparison foo bar) (const_int sign-bit))
2992 when STORE_FLAG_VALUE is the sign bit. */
2993 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2994 && trueop1 == const_true_rtx
2995 && COMPARISON_P (op0)
2996 && (reversed = reversed_comparison (op0, mode)))
2997 return reversed;
2999 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3000 if (tem)
3001 return tem;
3003 tem = simplify_associative_operation (code, mode, op0, op1);
3004 if (tem)
3005 return tem;
3006 break;
3008 case AND:
3009 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3010 return trueop1;
3011 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3012 return op0;
3013 if (HWI_COMPUTABLE_MODE_P (mode))
3015 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3016 HOST_WIDE_INT nzop1;
3017 if (CONST_INT_P (trueop1))
3019 HOST_WIDE_INT val1 = INTVAL (trueop1);
3020 /* If we are turning off bits already known off in OP0, we need
3021 not do an AND. */
3022 if ((nzop0 & ~val1) == 0)
3023 return op0;
3025 nzop1 = nonzero_bits (trueop1, mode);
3026 /* If we are clearing all the nonzero bits, the result is zero. */
3027 if ((nzop1 & nzop0) == 0
3028 && !side_effects_p (op0) && !side_effects_p (op1))
3029 return CONST0_RTX (mode);
3031 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3032 && GET_MODE_CLASS (mode) != MODE_CC)
3033 return op0;
3034 /* A & (~A) -> 0 */
3035 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3036 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3037 && ! side_effects_p (op0)
3038 && GET_MODE_CLASS (mode) != MODE_CC)
3039 return CONST0_RTX (mode);
3041 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3042 there are no nonzero bits of C outside of X's mode. */
3043 if ((GET_CODE (op0) == SIGN_EXTEND
3044 || GET_CODE (op0) == ZERO_EXTEND)
3045 && CONST_INT_P (trueop1)
3046 && HWI_COMPUTABLE_MODE_P (mode)
3047 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3048 & UINTVAL (trueop1)) == 0)
3050 machine_mode imode = GET_MODE (XEXP (op0, 0));
3051 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3052 gen_int_mode (INTVAL (trueop1),
3053 imode));
3054 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3057 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3058 we might be able to further simplify the AND with X and potentially
3059 remove the truncation altogether. */
3060 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3062 rtx x = XEXP (op0, 0);
3063 machine_mode xmode = GET_MODE (x);
3064 tem = simplify_gen_binary (AND, xmode, x,
3065 gen_int_mode (INTVAL (trueop1), xmode));
3066 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3069 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3070 if (GET_CODE (op0) == IOR
3071 && CONST_INT_P (trueop1)
3072 && CONST_INT_P (XEXP (op0, 1)))
3074 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3075 return simplify_gen_binary (IOR, mode,
3076 simplify_gen_binary (AND, mode,
3077 XEXP (op0, 0), op1),
3078 gen_int_mode (tmp, mode));
3081 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3082 insn (and may simplify more). */
3083 if (GET_CODE (op0) == XOR
3084 && rtx_equal_p (XEXP (op0, 0), op1)
3085 && ! side_effects_p (op1))
3086 return simplify_gen_binary (AND, mode,
3087 simplify_gen_unary (NOT, mode,
3088 XEXP (op0, 1), mode),
3089 op1);
3091 if (GET_CODE (op0) == XOR
3092 && rtx_equal_p (XEXP (op0, 1), op1)
3093 && ! side_effects_p (op1))
3094 return simplify_gen_binary (AND, mode,
3095 simplify_gen_unary (NOT, mode,
3096 XEXP (op0, 0), mode),
3097 op1);
3099 /* Similarly for (~(A ^ B)) & A. */
3100 if (GET_CODE (op0) == NOT
3101 && GET_CODE (XEXP (op0, 0)) == XOR
3102 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3103 && ! side_effects_p (op1))
3104 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3106 if (GET_CODE (op0) == NOT
3107 && GET_CODE (XEXP (op0, 0)) == XOR
3108 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3109 && ! side_effects_p (op1))
3110 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3112 /* Convert (A | B) & A to A. */
3113 if (GET_CODE (op0) == IOR
3114 && (rtx_equal_p (XEXP (op0, 0), op1)
3115 || rtx_equal_p (XEXP (op0, 1), op1))
3116 && ! side_effects_p (XEXP (op0, 0))
3117 && ! side_effects_p (XEXP (op0, 1)))
3118 return op1;
3120 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3121 ((A & N) + B) & M -> (A + B) & M
3122 Similarly if (N & M) == 0,
3123 ((A | N) + B) & M -> (A + B) & M
3124 and for - instead of + and/or ^ instead of |.
3125 Also, if (N & M) == 0, then
3126 (A +- N) & M -> A & M. */
3127 if (CONST_INT_P (trueop1)
3128 && HWI_COMPUTABLE_MODE_P (mode)
3129 && ~UINTVAL (trueop1)
3130 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3131 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3133 rtx pmop[2];
3134 int which;
3136 pmop[0] = XEXP (op0, 0);
3137 pmop[1] = XEXP (op0, 1);
3139 if (CONST_INT_P (pmop[1])
3140 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3141 return simplify_gen_binary (AND, mode, pmop[0], op1);
3143 for (which = 0; which < 2; which++)
3145 tem = pmop[which];
3146 switch (GET_CODE (tem))
3148 case AND:
3149 if (CONST_INT_P (XEXP (tem, 1))
3150 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3151 == UINTVAL (trueop1))
3152 pmop[which] = XEXP (tem, 0);
3153 break;
3154 case IOR:
3155 case XOR:
3156 if (CONST_INT_P (XEXP (tem, 1))
3157 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3158 pmop[which] = XEXP (tem, 0);
3159 break;
3160 default:
3161 break;
3165 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3167 tem = simplify_gen_binary (GET_CODE (op0), mode,
3168 pmop[0], pmop[1]);
3169 return simplify_gen_binary (code, mode, tem, op1);
3173 /* (and X (ior (not X) Y) -> (and X Y) */
3174 if (GET_CODE (op1) == IOR
3175 && GET_CODE (XEXP (op1, 0)) == NOT
3176 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3177 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3179 /* (and (ior (not X) Y) X) -> (and X Y) */
3180 if (GET_CODE (op0) == IOR
3181 && GET_CODE (XEXP (op0, 0)) == NOT
3182 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3183 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3185 /* (and X (ior Y (not X)) -> (and X Y) */
3186 if (GET_CODE (op1) == IOR
3187 && GET_CODE (XEXP (op1, 1)) == NOT
3188 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3189 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3191 /* (and (ior Y (not X)) X) -> (and X Y) */
3192 if (GET_CODE (op0) == IOR
3193 && GET_CODE (XEXP (op0, 1)) == NOT
3194 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3195 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3197 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3198 if (tem)
3199 return tem;
3201 tem = simplify_associative_operation (code, mode, op0, op1);
3202 if (tem)
3203 return tem;
3204 break;
3206 case UDIV:
3207 /* 0/x is 0 (or x&0 if x has side-effects). */
3208 if (trueop0 == CONST0_RTX (mode)
3209 && !cfun->can_throw_non_call_exceptions)
3211 if (side_effects_p (op1))
3212 return simplify_gen_binary (AND, mode, op1, trueop0);
3213 return trueop0;
3215 /* x/1 is x. */
3216 if (trueop1 == CONST1_RTX (mode))
3218 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3219 if (tem)
3220 return tem;
3222 /* Convert divide by power of two into shift. */
3223 if (CONST_INT_P (trueop1)
3224 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3225 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3226 break;
3228 case DIV:
3229 /* Handle floating point and integers separately. */
3230 if (SCALAR_FLOAT_MODE_P (mode))
3232 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3233 safe for modes with NaNs, since 0.0 / 0.0 will then be
3234 NaN rather than 0.0. Nor is it safe for modes with signed
3235 zeros, since dividing 0 by a negative number gives -0.0 */
3236 if (trueop0 == CONST0_RTX (mode)
3237 && !HONOR_NANS (mode)
3238 && !HONOR_SIGNED_ZEROS (mode)
3239 && ! side_effects_p (op1))
3240 return op0;
3241 /* x/1.0 is x. */
3242 if (trueop1 == CONST1_RTX (mode)
3243 && !HONOR_SNANS (mode))
3244 return op0;
3246 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3247 && trueop1 != CONST0_RTX (mode))
3249 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3251 /* x/-1.0 is -x. */
3252 if (real_equal (d1, &dconstm1)
3253 && !HONOR_SNANS (mode))
3254 return simplify_gen_unary (NEG, mode, op0, mode);
3256 /* Change FP division by a constant into multiplication.
3257 Only do this with -freciprocal-math. */
3258 if (flag_reciprocal_math
3259 && !real_equal (d1, &dconst0))
3261 REAL_VALUE_TYPE d;
3262 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3263 tem = const_double_from_real_value (d, mode);
3264 return simplify_gen_binary (MULT, mode, op0, tem);
3268 else if (SCALAR_INT_MODE_P (mode))
3270 /* 0/x is 0 (or x&0 if x has side-effects). */
3271 if (trueop0 == CONST0_RTX (mode)
3272 && !cfun->can_throw_non_call_exceptions)
3274 if (side_effects_p (op1))
3275 return simplify_gen_binary (AND, mode, op1, trueop0);
3276 return trueop0;
3278 /* x/1 is x. */
3279 if (trueop1 == CONST1_RTX (mode))
3281 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3282 if (tem)
3283 return tem;
3285 /* x/-1 is -x. */
3286 if (trueop1 == constm1_rtx)
3288 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3289 if (x)
3290 return simplify_gen_unary (NEG, mode, x, mode);
3293 break;
3295 case UMOD:
3296 /* 0%x is 0 (or x&0 if x has side-effects). */
3297 if (trueop0 == CONST0_RTX (mode))
3299 if (side_effects_p (op1))
3300 return simplify_gen_binary (AND, mode, op1, trueop0);
3301 return trueop0;
3303 /* x%1 is 0 (of x&0 if x has side-effects). */
3304 if (trueop1 == CONST1_RTX (mode))
3306 if (side_effects_p (op0))
3307 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3308 return CONST0_RTX (mode);
3310 /* Implement modulus by power of two as AND. */
3311 if (CONST_INT_P (trueop1)
3312 && exact_log2 (UINTVAL (trueop1)) > 0)
3313 return simplify_gen_binary (AND, mode, op0,
3314 gen_int_mode (INTVAL (op1) - 1, mode));
3315 break;
3317 case MOD:
3318 /* 0%x is 0 (or x&0 if x has side-effects). */
3319 if (trueop0 == CONST0_RTX (mode))
3321 if (side_effects_p (op1))
3322 return simplify_gen_binary (AND, mode, op1, trueop0);
3323 return trueop0;
3325 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3326 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3328 if (side_effects_p (op0))
3329 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3330 return CONST0_RTX (mode);
3332 break;
3334 case ROTATERT:
3335 case ROTATE:
3336 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3337 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3338 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3339 amount instead. */
3340 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3341 if (CONST_INT_P (trueop1)
3342 && IN_RANGE (INTVAL (trueop1),
3343 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3344 GET_MODE_PRECISION (mode) - 1))
3345 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3346 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3347 - INTVAL (trueop1)));
3348 #endif
3349 /* FALLTHRU */
3350 case ASHIFTRT:
3351 if (trueop1 == CONST0_RTX (mode))
3352 return op0;
3353 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3354 return op0;
3355 /* Rotating ~0 always results in ~0. */
3356 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3357 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3358 && ! side_effects_p (op1))
3359 return op0;
3361 canonicalize_shift:
3362 /* Given:
3363 scalar modes M1, M2
3364 scalar constants c1, c2
3365 size (M2) > size (M1)
3366 c1 == size (M2) - size (M1)
3367 optimize:
3368 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3369 <low_part>)
3370 (const_int <c2>))
3372 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3373 <low_part>). */
3374 if ((code == ASHIFTRT || code == LSHIFTRT)
3375 && !VECTOR_MODE_P (mode)
3376 && SUBREG_P (op0)
3377 && CONST_INT_P (op1)
3378 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3379 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3380 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3381 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3382 > GET_MODE_BITSIZE (mode))
3383 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3384 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3385 - GET_MODE_BITSIZE (mode)))
3386 && subreg_lowpart_p (op0))
3388 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3389 + INTVAL (op1));
3390 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3391 tmp = simplify_gen_binary (code,
3392 GET_MODE (SUBREG_REG (op0)),
3393 XEXP (SUBREG_REG (op0), 0),
3394 tmp);
3395 return lowpart_subreg (mode, tmp, inner_mode);
3398 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3400 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3401 if (val != INTVAL (op1))
3402 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3404 break;
3406 case ASHIFT:
3407 case SS_ASHIFT:
3408 case US_ASHIFT:
3409 if (trueop1 == CONST0_RTX (mode))
3410 return op0;
3411 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3412 return op0;
3413 goto canonicalize_shift;
3415 case LSHIFTRT:
3416 if (trueop1 == CONST0_RTX (mode))
3417 return op0;
3418 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3419 return op0;
3420 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3421 if (GET_CODE (op0) == CLZ
3422 && CONST_INT_P (trueop1)
3423 && STORE_FLAG_VALUE == 1
3424 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3426 machine_mode imode = GET_MODE (XEXP (op0, 0));
3427 unsigned HOST_WIDE_INT zero_val = 0;
3429 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3430 && zero_val == GET_MODE_PRECISION (imode)
3431 && INTVAL (trueop1) == exact_log2 (zero_val))
3432 return simplify_gen_relational (EQ, mode, imode,
3433 XEXP (op0, 0), const0_rtx);
3435 goto canonicalize_shift;
3437 case SMIN:
3438 if (width <= HOST_BITS_PER_WIDE_INT
3439 && mode_signbit_p (mode, trueop1)
3440 && ! side_effects_p (op0))
3441 return op1;
3442 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3443 return op0;
3444 tem = simplify_associative_operation (code, mode, op0, op1);
3445 if (tem)
3446 return tem;
3447 break;
3449 case SMAX:
3450 if (width <= HOST_BITS_PER_WIDE_INT
3451 && CONST_INT_P (trueop1)
3452 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3453 && ! side_effects_p (op0))
3454 return op1;
3455 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3456 return op0;
3457 tem = simplify_associative_operation (code, mode, op0, op1);
3458 if (tem)
3459 return tem;
3460 break;
3462 case UMIN:
3463 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3464 return op1;
3465 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3466 return op0;
3467 tem = simplify_associative_operation (code, mode, op0, op1);
3468 if (tem)
3469 return tem;
3470 break;
3472 case UMAX:
3473 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3474 return op1;
3475 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3476 return op0;
3477 tem = simplify_associative_operation (code, mode, op0, op1);
3478 if (tem)
3479 return tem;
3480 break;
3482 case SS_PLUS:
3483 case US_PLUS:
3484 case SS_MINUS:
3485 case US_MINUS:
3486 case SS_MULT:
3487 case US_MULT:
3488 case SS_DIV:
3489 case US_DIV:
3490 /* ??? There are simplifications that can be done. */
3491 return 0;
3493 case VEC_SELECT:
3494 if (!VECTOR_MODE_P (mode))
3496 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3497 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3498 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3499 gcc_assert (XVECLEN (trueop1, 0) == 1);
3500 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3502 if (GET_CODE (trueop0) == CONST_VECTOR)
3503 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3504 (trueop1, 0, 0)));
3506 /* Extract a scalar element from a nested VEC_SELECT expression
3507 (with optional nested VEC_CONCAT expression). Some targets
3508 (i386) extract scalar element from a vector using chain of
3509 nested VEC_SELECT expressions. When input operand is a memory
3510 operand, this operation can be simplified to a simple scalar
3511 load from an offseted memory address. */
3512 if (GET_CODE (trueop0) == VEC_SELECT)
3514 rtx op0 = XEXP (trueop0, 0);
3515 rtx op1 = XEXP (trueop0, 1);
3517 machine_mode opmode = GET_MODE (op0);
3518 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3519 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3521 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3522 int elem;
3524 rtvec vec;
3525 rtx tmp_op, tmp;
3527 gcc_assert (GET_CODE (op1) == PARALLEL);
3528 gcc_assert (i < n_elts);
3530 /* Select element, pointed by nested selector. */
3531 elem = INTVAL (XVECEXP (op1, 0, i));
3533 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3534 if (GET_CODE (op0) == VEC_CONCAT)
3536 rtx op00 = XEXP (op0, 0);
3537 rtx op01 = XEXP (op0, 1);
3539 machine_mode mode00, mode01;
3540 int n_elts00, n_elts01;
3542 mode00 = GET_MODE (op00);
3543 mode01 = GET_MODE (op01);
3545 /* Find out number of elements of each operand. */
3546 if (VECTOR_MODE_P (mode00))
3548 elt_size = GET_MODE_UNIT_SIZE (mode00);
3549 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3551 else
3552 n_elts00 = 1;
3554 if (VECTOR_MODE_P (mode01))
3556 elt_size = GET_MODE_UNIT_SIZE (mode01);
3557 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3559 else
3560 n_elts01 = 1;
3562 gcc_assert (n_elts == n_elts00 + n_elts01);
3564 /* Select correct operand of VEC_CONCAT
3565 and adjust selector. */
3566 if (elem < n_elts01)
3567 tmp_op = op00;
3568 else
3570 tmp_op = op01;
3571 elem -= n_elts00;
3574 else
3575 tmp_op = op0;
3577 vec = rtvec_alloc (1);
3578 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3580 tmp = gen_rtx_fmt_ee (code, mode,
3581 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3582 return tmp;
3584 if (GET_CODE (trueop0) == VEC_DUPLICATE
3585 && GET_MODE (XEXP (trueop0, 0)) == mode)
3586 return XEXP (trueop0, 0);
3588 else
3590 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3591 gcc_assert (GET_MODE_INNER (mode)
3592 == GET_MODE_INNER (GET_MODE (trueop0)));
3593 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3595 if (GET_CODE (trueop0) == CONST_VECTOR)
3597 int elt_size = GET_MODE_UNIT_SIZE (mode);
3598 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3599 rtvec v = rtvec_alloc (n_elts);
3600 unsigned int i;
3602 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3603 for (i = 0; i < n_elts; i++)
3605 rtx x = XVECEXP (trueop1, 0, i);
3607 gcc_assert (CONST_INT_P (x));
3608 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3609 INTVAL (x));
3612 return gen_rtx_CONST_VECTOR (mode, v);
3615 /* Recognize the identity. */
3616 if (GET_MODE (trueop0) == mode)
3618 bool maybe_ident = true;
3619 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3621 rtx j = XVECEXP (trueop1, 0, i);
3622 if (!CONST_INT_P (j) || INTVAL (j) != i)
3624 maybe_ident = false;
3625 break;
3628 if (maybe_ident)
3629 return trueop0;
3632 /* If we build {a,b} then permute it, build the result directly. */
3633 if (XVECLEN (trueop1, 0) == 2
3634 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3635 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3636 && GET_CODE (trueop0) == VEC_CONCAT
3637 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3638 && GET_MODE (XEXP (trueop0, 0)) == mode
3639 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3640 && GET_MODE (XEXP (trueop0, 1)) == mode)
3642 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3643 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3644 rtx subop0, subop1;
3646 gcc_assert (i0 < 4 && i1 < 4);
3647 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3648 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3650 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3653 if (XVECLEN (trueop1, 0) == 2
3654 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3655 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3656 && GET_CODE (trueop0) == VEC_CONCAT
3657 && GET_MODE (trueop0) == mode)
3659 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3660 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3661 rtx subop0, subop1;
3663 gcc_assert (i0 < 2 && i1 < 2);
3664 subop0 = XEXP (trueop0, i0);
3665 subop1 = XEXP (trueop0, i1);
3667 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3670 /* If we select one half of a vec_concat, return that. */
3671 if (GET_CODE (trueop0) == VEC_CONCAT
3672 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3674 rtx subop0 = XEXP (trueop0, 0);
3675 rtx subop1 = XEXP (trueop0, 1);
3676 machine_mode mode0 = GET_MODE (subop0);
3677 machine_mode mode1 = GET_MODE (subop1);
3678 int li = GET_MODE_UNIT_SIZE (mode0);
3679 int l0 = GET_MODE_SIZE (mode0) / li;
3680 int l1 = GET_MODE_SIZE (mode1) / li;
3681 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3682 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3684 bool success = true;
3685 for (int i = 1; i < l0; ++i)
3687 rtx j = XVECEXP (trueop1, 0, i);
3688 if (!CONST_INT_P (j) || INTVAL (j) != i)
3690 success = false;
3691 break;
3694 if (success)
3695 return subop0;
3697 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3699 bool success = true;
3700 for (int i = 1; i < l1; ++i)
3702 rtx j = XVECEXP (trueop1, 0, i);
3703 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3705 success = false;
3706 break;
3709 if (success)
3710 return subop1;
3715 if (XVECLEN (trueop1, 0) == 1
3716 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3717 && GET_CODE (trueop0) == VEC_CONCAT)
3719 rtx vec = trueop0;
3720 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3722 /* Try to find the element in the VEC_CONCAT. */
3723 while (GET_MODE (vec) != mode
3724 && GET_CODE (vec) == VEC_CONCAT)
3726 HOST_WIDE_INT vec_size;
3728 if (CONST_INT_P (XEXP (vec, 0)))
3730 /* vec_concat of two const_ints doesn't make sense with
3731 respect to modes. */
3732 if (CONST_INT_P (XEXP (vec, 1)))
3733 return 0;
3735 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3736 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3738 else
3739 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3741 if (offset < vec_size)
3742 vec = XEXP (vec, 0);
3743 else
3745 offset -= vec_size;
3746 vec = XEXP (vec, 1);
3748 vec = avoid_constant_pool_reference (vec);
3751 if (GET_MODE (vec) == mode)
3752 return vec;
3755 /* If we select elements in a vec_merge that all come from the same
3756 operand, select from that operand directly. */
3757 if (GET_CODE (op0) == VEC_MERGE)
3759 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3760 if (CONST_INT_P (trueop02))
3762 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3763 bool all_operand0 = true;
3764 bool all_operand1 = true;
3765 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3767 rtx j = XVECEXP (trueop1, 0, i);
3768 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3769 all_operand1 = false;
3770 else
3771 all_operand0 = false;
3773 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3774 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3775 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3776 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3780 /* If we have two nested selects that are inverses of each
3781 other, replace them with the source operand. */
3782 if (GET_CODE (trueop0) == VEC_SELECT
3783 && GET_MODE (XEXP (trueop0, 0)) == mode)
3785 rtx op0_subop1 = XEXP (trueop0, 1);
3786 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3787 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3789 /* Apply the outer ordering vector to the inner one. (The inner
3790 ordering vector is expressly permitted to be of a different
3791 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3792 then the two VEC_SELECTs cancel. */
3793 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3795 rtx x = XVECEXP (trueop1, 0, i);
3796 if (!CONST_INT_P (x))
3797 return 0;
3798 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3799 if (!CONST_INT_P (y) || i != INTVAL (y))
3800 return 0;
3802 return XEXP (trueop0, 0);
3805 return 0;
3806 case VEC_CONCAT:
3808 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3809 ? GET_MODE (trueop0)
3810 : GET_MODE_INNER (mode));
3811 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3812 ? GET_MODE (trueop1)
3813 : GET_MODE_INNER (mode));
3815 gcc_assert (VECTOR_MODE_P (mode));
3816 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3817 == GET_MODE_SIZE (mode));
3819 if (VECTOR_MODE_P (op0_mode))
3820 gcc_assert (GET_MODE_INNER (mode)
3821 == GET_MODE_INNER (op0_mode));
3822 else
3823 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3825 if (VECTOR_MODE_P (op1_mode))
3826 gcc_assert (GET_MODE_INNER (mode)
3827 == GET_MODE_INNER (op1_mode));
3828 else
3829 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3831 if ((GET_CODE (trueop0) == CONST_VECTOR
3832 || CONST_SCALAR_INT_P (trueop0)
3833 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3834 && (GET_CODE (trueop1) == CONST_VECTOR
3835 || CONST_SCALAR_INT_P (trueop1)
3836 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3838 int elt_size = GET_MODE_UNIT_SIZE (mode);
3839 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3840 rtvec v = rtvec_alloc (n_elts);
3841 unsigned int i;
3842 unsigned in_n_elts = 1;
3844 if (VECTOR_MODE_P (op0_mode))
3845 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3846 for (i = 0; i < n_elts; i++)
3848 if (i < in_n_elts)
3850 if (!VECTOR_MODE_P (op0_mode))
3851 RTVEC_ELT (v, i) = trueop0;
3852 else
3853 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3855 else
3857 if (!VECTOR_MODE_P (op1_mode))
3858 RTVEC_ELT (v, i) = trueop1;
3859 else
3860 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3861 i - in_n_elts);
3865 return gen_rtx_CONST_VECTOR (mode, v);
3868 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3869 Restrict the transformation to avoid generating a VEC_SELECT with a
3870 mode unrelated to its operand. */
3871 if (GET_CODE (trueop0) == VEC_SELECT
3872 && GET_CODE (trueop1) == VEC_SELECT
3873 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3874 && GET_MODE (XEXP (trueop0, 0)) == mode)
3876 rtx par0 = XEXP (trueop0, 1);
3877 rtx par1 = XEXP (trueop1, 1);
3878 int len0 = XVECLEN (par0, 0);
3879 int len1 = XVECLEN (par1, 0);
3880 rtvec vec = rtvec_alloc (len0 + len1);
3881 for (int i = 0; i < len0; i++)
3882 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3883 for (int i = 0; i < len1; i++)
3884 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3885 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3886 gen_rtx_PARALLEL (VOIDmode, vec));
3889 return 0;
3891 default:
3892 gcc_unreachable ();
3895 return 0;
3899 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3900 rtx op0, rtx op1)
3902 unsigned int width = GET_MODE_PRECISION (mode);
3904 if (VECTOR_MODE_P (mode)
3905 && code != VEC_CONCAT
3906 && GET_CODE (op0) == CONST_VECTOR
3907 && GET_CODE (op1) == CONST_VECTOR)
3909 unsigned n_elts = GET_MODE_NUNITS (mode);
3910 machine_mode op0mode = GET_MODE (op0);
3911 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3912 machine_mode op1mode = GET_MODE (op1);
3913 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3914 rtvec v = rtvec_alloc (n_elts);
3915 unsigned int i;
3917 gcc_assert (op0_n_elts == n_elts);
3918 gcc_assert (op1_n_elts == n_elts);
3919 for (i = 0; i < n_elts; i++)
3921 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3922 CONST_VECTOR_ELT (op0, i),
3923 CONST_VECTOR_ELT (op1, i));
3924 if (!x)
3925 return 0;
3926 RTVEC_ELT (v, i) = x;
3929 return gen_rtx_CONST_VECTOR (mode, v);
3932 if (VECTOR_MODE_P (mode)
3933 && code == VEC_CONCAT
3934 && (CONST_SCALAR_INT_P (op0)
3935 || GET_CODE (op0) == CONST_FIXED
3936 || CONST_DOUBLE_AS_FLOAT_P (op0))
3937 && (CONST_SCALAR_INT_P (op1)
3938 || CONST_DOUBLE_AS_FLOAT_P (op1)
3939 || GET_CODE (op1) == CONST_FIXED))
3941 unsigned n_elts = GET_MODE_NUNITS (mode);
3942 rtvec v = rtvec_alloc (n_elts);
3944 gcc_assert (n_elts >= 2);
3945 if (n_elts == 2)
3947 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3948 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3950 RTVEC_ELT (v, 0) = op0;
3951 RTVEC_ELT (v, 1) = op1;
3953 else
3955 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3956 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3957 unsigned i;
3959 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3960 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3961 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3963 for (i = 0; i < op0_n_elts; ++i)
3964 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3965 for (i = 0; i < op1_n_elts; ++i)
3966 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3969 return gen_rtx_CONST_VECTOR (mode, v);
3972 if (SCALAR_FLOAT_MODE_P (mode)
3973 && CONST_DOUBLE_AS_FLOAT_P (op0)
3974 && CONST_DOUBLE_AS_FLOAT_P (op1)
3975 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3977 if (code == AND
3978 || code == IOR
3979 || code == XOR)
3981 long tmp0[4];
3982 long tmp1[4];
3983 REAL_VALUE_TYPE r;
3984 int i;
3986 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3987 GET_MODE (op0));
3988 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3989 GET_MODE (op1));
3990 for (i = 0; i < 4; i++)
3992 switch (code)
3994 case AND:
3995 tmp0[i] &= tmp1[i];
3996 break;
3997 case IOR:
3998 tmp0[i] |= tmp1[i];
3999 break;
4000 case XOR:
4001 tmp0[i] ^= tmp1[i];
4002 break;
4003 default:
4004 gcc_unreachable ();
4007 real_from_target (&r, tmp0, mode);
4008 return const_double_from_real_value (r, mode);
4010 else
4012 REAL_VALUE_TYPE f0, f1, value, result;
4013 const REAL_VALUE_TYPE *opr0, *opr1;
4014 bool inexact;
4016 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4017 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4019 if (HONOR_SNANS (mode)
4020 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4021 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4022 return 0;
4024 real_convert (&f0, mode, opr0);
4025 real_convert (&f1, mode, opr1);
4027 if (code == DIV
4028 && real_equal (&f1, &dconst0)
4029 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4030 return 0;
4032 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4033 && flag_trapping_math
4034 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4036 int s0 = REAL_VALUE_NEGATIVE (f0);
4037 int s1 = REAL_VALUE_NEGATIVE (f1);
4039 switch (code)
4041 case PLUS:
4042 /* Inf + -Inf = NaN plus exception. */
4043 if (s0 != s1)
4044 return 0;
4045 break;
4046 case MINUS:
4047 /* Inf - Inf = NaN plus exception. */
4048 if (s0 == s1)
4049 return 0;
4050 break;
4051 case DIV:
4052 /* Inf / Inf = NaN plus exception. */
4053 return 0;
4054 default:
4055 break;
4059 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4060 && flag_trapping_math
4061 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4062 || (REAL_VALUE_ISINF (f1)
4063 && real_equal (&f0, &dconst0))))
4064 /* Inf * 0 = NaN plus exception. */
4065 return 0;
4067 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4068 &f0, &f1);
4069 real_convert (&result, mode, &value);
4071 /* Don't constant fold this floating point operation if
4072 the result has overflowed and flag_trapping_math. */
4074 if (flag_trapping_math
4075 && MODE_HAS_INFINITIES (mode)
4076 && REAL_VALUE_ISINF (result)
4077 && !REAL_VALUE_ISINF (f0)
4078 && !REAL_VALUE_ISINF (f1))
4079 /* Overflow plus exception. */
4080 return 0;
4082 /* Don't constant fold this floating point operation if the
4083 result may dependent upon the run-time rounding mode and
4084 flag_rounding_math is set, or if GCC's software emulation
4085 is unable to accurately represent the result. */
4087 if ((flag_rounding_math
4088 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4089 && (inexact || !real_identical (&result, &value)))
4090 return NULL_RTX;
4092 return const_double_from_real_value (result, mode);
4096 /* We can fold some multi-word operations. */
4097 if ((GET_MODE_CLASS (mode) == MODE_INT
4098 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4099 && CONST_SCALAR_INT_P (op0)
4100 && CONST_SCALAR_INT_P (op1))
4102 wide_int result;
4103 bool overflow;
4104 rtx_mode_t pop0 = rtx_mode_t (op0, mode);
4105 rtx_mode_t pop1 = rtx_mode_t (op1, mode);
4107 #if TARGET_SUPPORTS_WIDE_INT == 0
4108 /* This assert keeps the simplification from producing a result
4109 that cannot be represented in a CONST_DOUBLE but a lot of
4110 upstream callers expect that this function never fails to
4111 simplify something and so you if you added this to the test
4112 above the code would die later anyway. If this assert
4113 happens, you just need to make the port support wide int. */
4114 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4115 #endif
4116 switch (code)
4118 case MINUS:
4119 result = wi::sub (pop0, pop1);
4120 break;
4122 case PLUS:
4123 result = wi::add (pop0, pop1);
4124 break;
4126 case MULT:
4127 result = wi::mul (pop0, pop1);
4128 break;
4130 case DIV:
4131 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4132 if (overflow)
4133 return NULL_RTX;
4134 break;
4136 case MOD:
4137 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4138 if (overflow)
4139 return NULL_RTX;
4140 break;
4142 case UDIV:
4143 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4144 if (overflow)
4145 return NULL_RTX;
4146 break;
4148 case UMOD:
4149 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4150 if (overflow)
4151 return NULL_RTX;
4152 break;
4154 case AND:
4155 result = wi::bit_and (pop0, pop1);
4156 break;
4158 case IOR:
4159 result = wi::bit_or (pop0, pop1);
4160 break;
4162 case XOR:
4163 result = wi::bit_xor (pop0, pop1);
4164 break;
4166 case SMIN:
4167 result = wi::smin (pop0, pop1);
4168 break;
4170 case SMAX:
4171 result = wi::smax (pop0, pop1);
4172 break;
4174 case UMIN:
4175 result = wi::umin (pop0, pop1);
4176 break;
4178 case UMAX:
4179 result = wi::umax (pop0, pop1);
4180 break;
4182 case LSHIFTRT:
4183 case ASHIFTRT:
4184 case ASHIFT:
4186 wide_int wop1 = pop1;
4187 if (SHIFT_COUNT_TRUNCATED)
4188 wop1 = wi::umod_trunc (wop1, width);
4189 else if (wi::geu_p (wop1, width))
4190 return NULL_RTX;
4192 switch (code)
4194 case LSHIFTRT:
4195 result = wi::lrshift (pop0, wop1);
4196 break;
4198 case ASHIFTRT:
4199 result = wi::arshift (pop0, wop1);
4200 break;
4202 case ASHIFT:
4203 result = wi::lshift (pop0, wop1);
4204 break;
4206 default:
4207 gcc_unreachable ();
4209 break;
4211 case ROTATE:
4212 case ROTATERT:
4214 if (wi::neg_p (pop1))
4215 return NULL_RTX;
4217 switch (code)
4219 case ROTATE:
4220 result = wi::lrotate (pop0, pop1);
4221 break;
4223 case ROTATERT:
4224 result = wi::rrotate (pop0, pop1);
4225 break;
4227 default:
4228 gcc_unreachable ();
4230 break;
4232 default:
4233 return NULL_RTX;
4235 return immed_wide_int_const (result, mode);
4238 return NULL_RTX;
4243 /* Return a positive integer if X should sort after Y. The value
4244 returned is 1 if and only if X and Y are both regs. */
4246 static int
4247 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4249 int result;
4251 result = (commutative_operand_precedence (y)
4252 - commutative_operand_precedence (x));
4253 if (result)
4254 return result + result;
4256 /* Group together equal REGs to do more simplification. */
4257 if (REG_P (x) && REG_P (y))
4258 return REGNO (x) > REGNO (y);
4260 return 0;
4263 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4264 operands may be another PLUS or MINUS.
4266 Rather than test for specific case, we do this by a brute-force method
4267 and do all possible simplifications until no more changes occur. Then
4268 we rebuild the operation.
4270 May return NULL_RTX when no changes were made. */
4272 static rtx
4273 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4274 rtx op1)
4276 struct simplify_plus_minus_op_data
4278 rtx op;
4279 short neg;
4280 } ops[16];
4281 rtx result, tem;
4282 int n_ops = 2;
4283 int changed, n_constants, canonicalized = 0;
4284 int i, j;
4286 memset (ops, 0, sizeof ops);
4288 /* Set up the two operands and then expand them until nothing has been
4289 changed. If we run out of room in our array, give up; this should
4290 almost never happen. */
4292 ops[0].op = op0;
4293 ops[0].neg = 0;
4294 ops[1].op = op1;
4295 ops[1].neg = (code == MINUS);
4299 changed = 0;
4300 n_constants = 0;
4302 for (i = 0; i < n_ops; i++)
4304 rtx this_op = ops[i].op;
4305 int this_neg = ops[i].neg;
4306 enum rtx_code this_code = GET_CODE (this_op);
4308 switch (this_code)
4310 case PLUS:
4311 case MINUS:
4312 if (n_ops == ARRAY_SIZE (ops))
4313 return NULL_RTX;
4315 ops[n_ops].op = XEXP (this_op, 1);
4316 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4317 n_ops++;
4319 ops[i].op = XEXP (this_op, 0);
4320 changed = 1;
4321 /* If this operand was negated then we will potentially
4322 canonicalize the expression. Similarly if we don't
4323 place the operands adjacent we're re-ordering the
4324 expression and thus might be performing a
4325 canonicalization. Ignore register re-ordering.
4326 ??? It might be better to shuffle the ops array here,
4327 but then (plus (plus (A, B), plus (C, D))) wouldn't
4328 be seen as non-canonical. */
4329 if (this_neg
4330 || (i != n_ops - 2
4331 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4332 canonicalized = 1;
4333 break;
4335 case NEG:
4336 ops[i].op = XEXP (this_op, 0);
4337 ops[i].neg = ! this_neg;
4338 changed = 1;
4339 canonicalized = 1;
4340 break;
4342 case CONST:
4343 if (n_ops != ARRAY_SIZE (ops)
4344 && GET_CODE (XEXP (this_op, 0)) == PLUS
4345 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4346 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4348 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4349 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4350 ops[n_ops].neg = this_neg;
4351 n_ops++;
4352 changed = 1;
4353 canonicalized = 1;
4355 break;
4357 case NOT:
4358 /* ~a -> (-a - 1) */
4359 if (n_ops != ARRAY_SIZE (ops))
4361 ops[n_ops].op = CONSTM1_RTX (mode);
4362 ops[n_ops++].neg = this_neg;
4363 ops[i].op = XEXP (this_op, 0);
4364 ops[i].neg = !this_neg;
4365 changed = 1;
4366 canonicalized = 1;
4368 break;
4370 case CONST_INT:
4371 n_constants++;
4372 if (this_neg)
4374 ops[i].op = neg_const_int (mode, this_op);
4375 ops[i].neg = 0;
4376 changed = 1;
4377 canonicalized = 1;
4379 break;
4381 default:
4382 break;
4386 while (changed);
4388 if (n_constants > 1)
4389 canonicalized = 1;
4391 gcc_assert (n_ops >= 2);
4393 /* If we only have two operands, we can avoid the loops. */
4394 if (n_ops == 2)
4396 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4397 rtx lhs, rhs;
4399 /* Get the two operands. Be careful with the order, especially for
4400 the cases where code == MINUS. */
4401 if (ops[0].neg && ops[1].neg)
4403 lhs = gen_rtx_NEG (mode, ops[0].op);
4404 rhs = ops[1].op;
4406 else if (ops[0].neg)
4408 lhs = ops[1].op;
4409 rhs = ops[0].op;
4411 else
4413 lhs = ops[0].op;
4414 rhs = ops[1].op;
4417 return simplify_const_binary_operation (code, mode, lhs, rhs);
4420 /* Now simplify each pair of operands until nothing changes. */
4421 while (1)
4423 /* Insertion sort is good enough for a small array. */
4424 for (i = 1; i < n_ops; i++)
4426 struct simplify_plus_minus_op_data save;
4427 int cmp;
4429 j = i - 1;
4430 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4431 if (cmp <= 0)
4432 continue;
4433 /* Just swapping registers doesn't count as canonicalization. */
4434 if (cmp != 1)
4435 canonicalized = 1;
4437 save = ops[i];
4439 ops[j + 1] = ops[j];
4440 while (j--
4441 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4442 ops[j + 1] = save;
4445 changed = 0;
4446 for (i = n_ops - 1; i > 0; i--)
4447 for (j = i - 1; j >= 0; j--)
4449 rtx lhs = ops[j].op, rhs = ops[i].op;
4450 int lneg = ops[j].neg, rneg = ops[i].neg;
4452 if (lhs != 0 && rhs != 0)
4454 enum rtx_code ncode = PLUS;
4456 if (lneg != rneg)
4458 ncode = MINUS;
4459 if (lneg)
4460 std::swap (lhs, rhs);
4462 else if (swap_commutative_operands_p (lhs, rhs))
4463 std::swap (lhs, rhs);
4465 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4466 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4468 rtx tem_lhs, tem_rhs;
4470 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4471 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4472 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4473 tem_rhs);
4475 if (tem && !CONSTANT_P (tem))
4476 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4478 else
4479 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4481 if (tem)
4483 /* Reject "simplifications" that just wrap the two
4484 arguments in a CONST. Failure to do so can result
4485 in infinite recursion with simplify_binary_operation
4486 when it calls us to simplify CONST operations.
4487 Also, if we find such a simplification, don't try
4488 any more combinations with this rhs: We must have
4489 something like symbol+offset, ie. one of the
4490 trivial CONST expressions we handle later. */
4491 if (GET_CODE (tem) == CONST
4492 && GET_CODE (XEXP (tem, 0)) == ncode
4493 && XEXP (XEXP (tem, 0), 0) == lhs
4494 && XEXP (XEXP (tem, 0), 1) == rhs)
4495 break;
4496 lneg &= rneg;
4497 if (GET_CODE (tem) == NEG)
4498 tem = XEXP (tem, 0), lneg = !lneg;
4499 if (CONST_INT_P (tem) && lneg)
4500 tem = neg_const_int (mode, tem), lneg = 0;
4502 ops[i].op = tem;
4503 ops[i].neg = lneg;
4504 ops[j].op = NULL_RTX;
4505 changed = 1;
4506 canonicalized = 1;
4511 if (!changed)
4512 break;
4514 /* Pack all the operands to the lower-numbered entries. */
4515 for (i = 0, j = 0; j < n_ops; j++)
4516 if (ops[j].op)
4518 ops[i] = ops[j];
4519 i++;
4521 n_ops = i;
4524 /* If nothing changed, check that rematerialization of rtl instructions
4525 is still required. */
4526 if (!canonicalized)
4528 /* Perform rematerialization if only all operands are registers and
4529 all operations are PLUS. */
4530 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4531 around rs6000 and how it uses the CA register. See PR67145. */
4532 for (i = 0; i < n_ops; i++)
4533 if (ops[i].neg
4534 || !REG_P (ops[i].op)
4535 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4536 && fixed_regs[REGNO (ops[i].op)]
4537 && !global_regs[REGNO (ops[i].op)]
4538 && ops[i].op != frame_pointer_rtx
4539 && ops[i].op != arg_pointer_rtx
4540 && ops[i].op != stack_pointer_rtx))
4541 return NULL_RTX;
4542 goto gen_result;
4545 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4546 if (n_ops == 2
4547 && CONST_INT_P (ops[1].op)
4548 && CONSTANT_P (ops[0].op)
4549 && ops[0].neg)
4550 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4552 /* We suppressed creation of trivial CONST expressions in the
4553 combination loop to avoid recursion. Create one manually now.
4554 The combination loop should have ensured that there is exactly
4555 one CONST_INT, and the sort will have ensured that it is last
4556 in the array and that any other constant will be next-to-last. */
4558 if (n_ops > 1
4559 && CONST_INT_P (ops[n_ops - 1].op)
4560 && CONSTANT_P (ops[n_ops - 2].op))
4562 rtx value = ops[n_ops - 1].op;
4563 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4564 value = neg_const_int (mode, value);
4565 if (CONST_INT_P (value))
4567 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4568 INTVAL (value));
4569 n_ops--;
4573 /* Put a non-negated operand first, if possible. */
4575 for (i = 0; i < n_ops && ops[i].neg; i++)
4576 continue;
4577 if (i == n_ops)
4578 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4579 else if (i != 0)
4581 tem = ops[0].op;
4582 ops[0] = ops[i];
4583 ops[i].op = tem;
4584 ops[i].neg = 1;
4587 /* Now make the result by performing the requested operations. */
4588 gen_result:
4589 result = ops[0].op;
4590 for (i = 1; i < n_ops; i++)
4591 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4592 mode, result, ops[i].op);
4594 return result;
4597 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4598 static bool
4599 plus_minus_operand_p (const_rtx x)
4601 return GET_CODE (x) == PLUS
4602 || GET_CODE (x) == MINUS
4603 || (GET_CODE (x) == CONST
4604 && GET_CODE (XEXP (x, 0)) == PLUS
4605 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4606 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4609 /* Like simplify_binary_operation except used for relational operators.
4610 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4611 not also be VOIDmode.
4613 CMP_MODE specifies in which mode the comparison is done in, so it is
4614 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4615 the operands or, if both are VOIDmode, the operands are compared in
4616 "infinite precision". */
4618 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4619 machine_mode cmp_mode, rtx op0, rtx op1)
4621 rtx tem, trueop0, trueop1;
4623 if (cmp_mode == VOIDmode)
4624 cmp_mode = GET_MODE (op0);
4625 if (cmp_mode == VOIDmode)
4626 cmp_mode = GET_MODE (op1);
4628 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4629 if (tem)
4631 if (SCALAR_FLOAT_MODE_P (mode))
4633 if (tem == const0_rtx)
4634 return CONST0_RTX (mode);
4635 #ifdef FLOAT_STORE_FLAG_VALUE
4637 REAL_VALUE_TYPE val;
4638 val = FLOAT_STORE_FLAG_VALUE (mode);
4639 return const_double_from_real_value (val, mode);
4641 #else
4642 return NULL_RTX;
4643 #endif
4645 if (VECTOR_MODE_P (mode))
4647 if (tem == const0_rtx)
4648 return CONST0_RTX (mode);
4649 #ifdef VECTOR_STORE_FLAG_VALUE
4651 int i, units;
4652 rtvec v;
4654 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4655 if (val == NULL_RTX)
4656 return NULL_RTX;
4657 if (val == const1_rtx)
4658 return CONST1_RTX (mode);
4660 units = GET_MODE_NUNITS (mode);
4661 v = rtvec_alloc (units);
4662 for (i = 0; i < units; i++)
4663 RTVEC_ELT (v, i) = val;
4664 return gen_rtx_raw_CONST_VECTOR (mode, v);
4666 #else
4667 return NULL_RTX;
4668 #endif
4671 return tem;
4674 /* For the following tests, ensure const0_rtx is op1. */
4675 if (swap_commutative_operands_p (op0, op1)
4676 || (op0 == const0_rtx && op1 != const0_rtx))
4677 std::swap (op0, op1), code = swap_condition (code);
4679 /* If op0 is a compare, extract the comparison arguments from it. */
4680 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4681 return simplify_gen_relational (code, mode, VOIDmode,
4682 XEXP (op0, 0), XEXP (op0, 1));
4684 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4685 || CC0_P (op0))
4686 return NULL_RTX;
4688 trueop0 = avoid_constant_pool_reference (op0);
4689 trueop1 = avoid_constant_pool_reference (op1);
4690 return simplify_relational_operation_1 (code, mode, cmp_mode,
4691 trueop0, trueop1);
4694 /* This part of simplify_relational_operation is only used when CMP_MODE
4695 is not in class MODE_CC (i.e. it is a real comparison).
4697 MODE is the mode of the result, while CMP_MODE specifies in which
4698 mode the comparison is done in, so it is the mode of the operands. */
4700 static rtx
4701 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4702 machine_mode cmp_mode, rtx op0, rtx op1)
4704 enum rtx_code op0code = GET_CODE (op0);
4706 if (op1 == const0_rtx && COMPARISON_P (op0))
4708 /* If op0 is a comparison, extract the comparison arguments
4709 from it. */
4710 if (code == NE)
4712 if (GET_MODE (op0) == mode)
4713 return simplify_rtx (op0);
4714 else
4715 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4716 XEXP (op0, 0), XEXP (op0, 1));
4718 else if (code == EQ)
4720 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4721 if (new_code != UNKNOWN)
4722 return simplify_gen_relational (new_code, mode, VOIDmode,
4723 XEXP (op0, 0), XEXP (op0, 1));
4727 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4728 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4729 if ((code == LTU || code == GEU)
4730 && GET_CODE (op0) == PLUS
4731 && CONST_INT_P (XEXP (op0, 1))
4732 && (rtx_equal_p (op1, XEXP (op0, 0))
4733 || rtx_equal_p (op1, XEXP (op0, 1)))
4734 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4735 && XEXP (op0, 1) != const0_rtx)
4737 rtx new_cmp
4738 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4739 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4740 cmp_mode, XEXP (op0, 0), new_cmp);
4743 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4744 transformed into (LTU a -C). */
4745 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4746 && CONST_INT_P (XEXP (op0, 1))
4747 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4748 && XEXP (op0, 1) != const0_rtx)
4750 rtx new_cmp
4751 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4752 return simplify_gen_relational (LTU, mode, cmp_mode,
4753 XEXP (op0, 0), new_cmp);
4756 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4757 if ((code == LTU || code == GEU)
4758 && GET_CODE (op0) == PLUS
4759 && rtx_equal_p (op1, XEXP (op0, 1))
4760 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4761 && !rtx_equal_p (op1, XEXP (op0, 0)))
4762 return simplify_gen_relational (code, mode, cmp_mode, op0,
4763 copy_rtx (XEXP (op0, 0)));
4765 if (op1 == const0_rtx)
4767 /* Canonicalize (GTU x 0) as (NE x 0). */
4768 if (code == GTU)
4769 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4770 /* Canonicalize (LEU x 0) as (EQ x 0). */
4771 if (code == LEU)
4772 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4774 else if (op1 == const1_rtx)
4776 switch (code)
4778 case GE:
4779 /* Canonicalize (GE x 1) as (GT x 0). */
4780 return simplify_gen_relational (GT, mode, cmp_mode,
4781 op0, const0_rtx);
4782 case GEU:
4783 /* Canonicalize (GEU x 1) as (NE x 0). */
4784 return simplify_gen_relational (NE, mode, cmp_mode,
4785 op0, const0_rtx);
4786 case LT:
4787 /* Canonicalize (LT x 1) as (LE x 0). */
4788 return simplify_gen_relational (LE, mode, cmp_mode,
4789 op0, const0_rtx);
4790 case LTU:
4791 /* Canonicalize (LTU x 1) as (EQ x 0). */
4792 return simplify_gen_relational (EQ, mode, cmp_mode,
4793 op0, const0_rtx);
4794 default:
4795 break;
4798 else if (op1 == constm1_rtx)
4800 /* Canonicalize (LE x -1) as (LT x 0). */
4801 if (code == LE)
4802 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4803 /* Canonicalize (GT x -1) as (GE x 0). */
4804 if (code == GT)
4805 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4808 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4809 if ((code == EQ || code == NE)
4810 && (op0code == PLUS || op0code == MINUS)
4811 && CONSTANT_P (op1)
4812 && CONSTANT_P (XEXP (op0, 1))
4813 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4815 rtx x = XEXP (op0, 0);
4816 rtx c = XEXP (op0, 1);
4817 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4818 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4820 /* Detect an infinite recursive condition, where we oscillate at this
4821 simplification case between:
4822 A + B == C <---> C - B == A,
4823 where A, B, and C are all constants with non-simplifiable expressions,
4824 usually SYMBOL_REFs. */
4825 if (GET_CODE (tem) == invcode
4826 && CONSTANT_P (x)
4827 && rtx_equal_p (c, XEXP (tem, 1)))
4828 return NULL_RTX;
4830 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4833 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4834 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4835 scalar_int_mode int_mode;
4836 if (code == NE
4837 && op1 == const0_rtx
4838 && is_int_mode (mode, &int_mode)
4839 && cmp_mode != VOIDmode
4840 /* ??? Work-around BImode bugs in the ia64 backend. */
4841 && int_mode != BImode
4842 && cmp_mode != BImode
4843 && nonzero_bits (op0, cmp_mode) == 1
4844 && STORE_FLAG_VALUE == 1)
4845 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (cmp_mode)
4846 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, cmp_mode)
4847 : lowpart_subreg (int_mode, op0, cmp_mode);
4849 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4850 if ((code == EQ || code == NE)
4851 && op1 == const0_rtx
4852 && op0code == XOR)
4853 return simplify_gen_relational (code, mode, cmp_mode,
4854 XEXP (op0, 0), XEXP (op0, 1));
4856 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4857 if ((code == EQ || code == NE)
4858 && op0code == XOR
4859 && rtx_equal_p (XEXP (op0, 0), op1)
4860 && !side_effects_p (XEXP (op0, 0)))
4861 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4862 CONST0_RTX (mode));
4864 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4865 if ((code == EQ || code == NE)
4866 && op0code == XOR
4867 && rtx_equal_p (XEXP (op0, 1), op1)
4868 && !side_effects_p (XEXP (op0, 1)))
4869 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4870 CONST0_RTX (mode));
4872 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4873 if ((code == EQ || code == NE)
4874 && op0code == XOR
4875 && CONST_SCALAR_INT_P (op1)
4876 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4877 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4878 simplify_gen_binary (XOR, cmp_mode,
4879 XEXP (op0, 1), op1));
4881 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4882 can be implemented with a BICS instruction on some targets, or
4883 constant-folded if y is a constant. */
4884 if ((code == EQ || code == NE)
4885 && op0code == AND
4886 && rtx_equal_p (XEXP (op0, 0), op1)
4887 && !side_effects_p (op1)
4888 && op1 != CONST0_RTX (cmp_mode))
4890 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4891 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4893 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4894 CONST0_RTX (cmp_mode));
4897 /* Likewise for (eq/ne (and x y) y). */
4898 if ((code == EQ || code == NE)
4899 && op0code == AND
4900 && rtx_equal_p (XEXP (op0, 1), op1)
4901 && !side_effects_p (op1)
4902 && op1 != CONST0_RTX (cmp_mode))
4904 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4905 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4907 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4908 CONST0_RTX (cmp_mode));
4911 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4912 if ((code == EQ || code == NE)
4913 && GET_CODE (op0) == BSWAP
4914 && CONST_SCALAR_INT_P (op1))
4915 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4916 simplify_gen_unary (BSWAP, cmp_mode,
4917 op1, cmp_mode));
4919 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4920 if ((code == EQ || code == NE)
4921 && GET_CODE (op0) == BSWAP
4922 && GET_CODE (op1) == BSWAP)
4923 return simplify_gen_relational (code, mode, cmp_mode,
4924 XEXP (op0, 0), XEXP (op1, 0));
4926 if (op0code == POPCOUNT && op1 == const0_rtx)
4927 switch (code)
4929 case EQ:
4930 case LE:
4931 case LEU:
4932 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4933 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4934 XEXP (op0, 0), const0_rtx);
4936 case NE:
4937 case GT:
4938 case GTU:
4939 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4940 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4941 XEXP (op0, 0), const0_rtx);
4943 default:
4944 break;
4947 return NULL_RTX;
4950 enum
4952 CMP_EQ = 1,
4953 CMP_LT = 2,
4954 CMP_GT = 4,
4955 CMP_LTU = 8,
4956 CMP_GTU = 16
4960 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4961 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4962 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4963 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4964 For floating-point comparisons, assume that the operands were ordered. */
4966 static rtx
4967 comparison_result (enum rtx_code code, int known_results)
4969 switch (code)
4971 case EQ:
4972 case UNEQ:
4973 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4974 case NE:
4975 case LTGT:
4976 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4978 case LT:
4979 case UNLT:
4980 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4981 case GE:
4982 case UNGE:
4983 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4985 case GT:
4986 case UNGT:
4987 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4988 case LE:
4989 case UNLE:
4990 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4992 case LTU:
4993 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4994 case GEU:
4995 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4997 case GTU:
4998 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4999 case LEU:
5000 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5002 case ORDERED:
5003 return const_true_rtx;
5004 case UNORDERED:
5005 return const0_rtx;
5006 default:
5007 gcc_unreachable ();
5011 /* Check if the given comparison (done in the given MODE) is actually
5012 a tautology or a contradiction. If the mode is VOID_mode, the
5013 comparison is done in "infinite precision". If no simplification
5014 is possible, this function returns zero. Otherwise, it returns
5015 either const_true_rtx or const0_rtx. */
5018 simplify_const_relational_operation (enum rtx_code code,
5019 machine_mode mode,
5020 rtx op0, rtx op1)
5022 rtx tem;
5023 rtx trueop0;
5024 rtx trueop1;
5026 gcc_assert (mode != VOIDmode
5027 || (GET_MODE (op0) == VOIDmode
5028 && GET_MODE (op1) == VOIDmode));
5030 /* If op0 is a compare, extract the comparison arguments from it. */
5031 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5033 op1 = XEXP (op0, 1);
5034 op0 = XEXP (op0, 0);
5036 if (GET_MODE (op0) != VOIDmode)
5037 mode = GET_MODE (op0);
5038 else if (GET_MODE (op1) != VOIDmode)
5039 mode = GET_MODE (op1);
5040 else
5041 return 0;
5044 /* We can't simplify MODE_CC values since we don't know what the
5045 actual comparison is. */
5046 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5047 return 0;
5049 /* Make sure the constant is second. */
5050 if (swap_commutative_operands_p (op0, op1))
5052 std::swap (op0, op1);
5053 code = swap_condition (code);
5056 trueop0 = avoid_constant_pool_reference (op0);
5057 trueop1 = avoid_constant_pool_reference (op1);
5059 /* For integer comparisons of A and B maybe we can simplify A - B and can
5060 then simplify a comparison of that with zero. If A and B are both either
5061 a register or a CONST_INT, this can't help; testing for these cases will
5062 prevent infinite recursion here and speed things up.
5064 We can only do this for EQ and NE comparisons as otherwise we may
5065 lose or introduce overflow which we cannot disregard as undefined as
5066 we do not know the signedness of the operation on either the left or
5067 the right hand side of the comparison. */
5069 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5070 && (code == EQ || code == NE)
5071 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5072 && (REG_P (op1) || CONST_INT_P (trueop1)))
5073 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5074 /* We cannot do this if tem is a nonzero address. */
5075 && ! nonzero_address_p (tem))
5076 return simplify_const_relational_operation (signed_condition (code),
5077 mode, tem, const0_rtx);
5079 if (! HONOR_NANS (mode) && code == ORDERED)
5080 return const_true_rtx;
5082 if (! HONOR_NANS (mode) && code == UNORDERED)
5083 return const0_rtx;
5085 /* For modes without NaNs, if the two operands are equal, we know the
5086 result except if they have side-effects. Even with NaNs we know
5087 the result of unordered comparisons and, if signaling NaNs are
5088 irrelevant, also the result of LT/GT/LTGT. */
5089 if ((! HONOR_NANS (trueop0)
5090 || code == UNEQ || code == UNLE || code == UNGE
5091 || ((code == LT || code == GT || code == LTGT)
5092 && ! HONOR_SNANS (trueop0)))
5093 && rtx_equal_p (trueop0, trueop1)
5094 && ! side_effects_p (trueop0))
5095 return comparison_result (code, CMP_EQ);
5097 /* If the operands are floating-point constants, see if we can fold
5098 the result. */
5099 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5100 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5101 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5103 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5104 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5106 /* Comparisons are unordered iff at least one of the values is NaN. */
5107 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5108 switch (code)
5110 case UNEQ:
5111 case UNLT:
5112 case UNGT:
5113 case UNLE:
5114 case UNGE:
5115 case NE:
5116 case UNORDERED:
5117 return const_true_rtx;
5118 case EQ:
5119 case LT:
5120 case GT:
5121 case LE:
5122 case GE:
5123 case LTGT:
5124 case ORDERED:
5125 return const0_rtx;
5126 default:
5127 return 0;
5130 return comparison_result (code,
5131 (real_equal (d0, d1) ? CMP_EQ :
5132 real_less (d0, d1) ? CMP_LT : CMP_GT));
5135 /* Otherwise, see if the operands are both integers. */
5136 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5137 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5139 /* It would be nice if we really had a mode here. However, the
5140 largest int representable on the target is as good as
5141 infinite. */
5142 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5143 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5144 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5146 if (wi::eq_p (ptrueop0, ptrueop1))
5147 return comparison_result (code, CMP_EQ);
5148 else
5150 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5151 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5152 return comparison_result (code, cr);
5156 /* Optimize comparisons with upper and lower bounds. */
5157 if (HWI_COMPUTABLE_MODE_P (mode)
5158 && CONST_INT_P (trueop1)
5159 && !side_effects_p (trueop0))
5161 int sign;
5162 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5163 HOST_WIDE_INT val = INTVAL (trueop1);
5164 HOST_WIDE_INT mmin, mmax;
5166 if (code == GEU
5167 || code == LEU
5168 || code == GTU
5169 || code == LTU)
5170 sign = 0;
5171 else
5172 sign = 1;
5174 /* Get a reduced range if the sign bit is zero. */
5175 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5177 mmin = 0;
5178 mmax = nonzero;
5180 else
5182 rtx mmin_rtx, mmax_rtx;
5183 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5185 mmin = INTVAL (mmin_rtx);
5186 mmax = INTVAL (mmax_rtx);
5187 if (sign)
5189 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5191 mmin >>= (sign_copies - 1);
5192 mmax >>= (sign_copies - 1);
5196 switch (code)
5198 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5199 case GEU:
5200 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5201 return const_true_rtx;
5202 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5203 return const0_rtx;
5204 break;
5205 case GE:
5206 if (val <= mmin)
5207 return const_true_rtx;
5208 if (val > mmax)
5209 return const0_rtx;
5210 break;
5212 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5213 case LEU:
5214 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5215 return const_true_rtx;
5216 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5217 return const0_rtx;
5218 break;
5219 case LE:
5220 if (val >= mmax)
5221 return const_true_rtx;
5222 if (val < mmin)
5223 return const0_rtx;
5224 break;
5226 case EQ:
5227 /* x == y is always false for y out of range. */
5228 if (val < mmin || val > mmax)
5229 return const0_rtx;
5230 break;
5232 /* x > y is always false for y >= mmax, always true for y < mmin. */
5233 case GTU:
5234 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5235 return const0_rtx;
5236 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5237 return const_true_rtx;
5238 break;
5239 case GT:
5240 if (val >= mmax)
5241 return const0_rtx;
5242 if (val < mmin)
5243 return const_true_rtx;
5244 break;
5246 /* x < y is always false for y <= mmin, always true for y > mmax. */
5247 case LTU:
5248 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5249 return const0_rtx;
5250 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5251 return const_true_rtx;
5252 break;
5253 case LT:
5254 if (val <= mmin)
5255 return const0_rtx;
5256 if (val > mmax)
5257 return const_true_rtx;
5258 break;
5260 case NE:
5261 /* x != y is always true for y out of range. */
5262 if (val < mmin || val > mmax)
5263 return const_true_rtx;
5264 break;
5266 default:
5267 break;
5271 /* Optimize integer comparisons with zero. */
5272 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5274 /* Some addresses are known to be nonzero. We don't know
5275 their sign, but equality comparisons are known. */
5276 if (nonzero_address_p (trueop0))
5278 if (code == EQ || code == LEU)
5279 return const0_rtx;
5280 if (code == NE || code == GTU)
5281 return const_true_rtx;
5284 /* See if the first operand is an IOR with a constant. If so, we
5285 may be able to determine the result of this comparison. */
5286 if (GET_CODE (op0) == IOR)
5288 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5289 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5291 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5292 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5293 && (UINTVAL (inner_const)
5294 & (HOST_WIDE_INT_1U
5295 << sign_bitnum)));
5297 switch (code)
5299 case EQ:
5300 case LEU:
5301 return const0_rtx;
5302 case NE:
5303 case GTU:
5304 return const_true_rtx;
5305 case LT:
5306 case LE:
5307 if (has_sign)
5308 return const_true_rtx;
5309 break;
5310 case GT:
5311 case GE:
5312 if (has_sign)
5313 return const0_rtx;
5314 break;
5315 default:
5316 break;
5322 /* Optimize comparison of ABS with zero. */
5323 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5324 && (GET_CODE (trueop0) == ABS
5325 || (GET_CODE (trueop0) == FLOAT_EXTEND
5326 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5328 switch (code)
5330 case LT:
5331 /* Optimize abs(x) < 0.0. */
5332 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5333 return const0_rtx;
5334 break;
5336 case GE:
5337 /* Optimize abs(x) >= 0.0. */
5338 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5339 return const_true_rtx;
5340 break;
5342 case UNGE:
5343 /* Optimize ! (abs(x) < 0.0). */
5344 return const_true_rtx;
5346 default:
5347 break;
5351 return 0;
5354 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5355 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5356 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5357 can be simplified to that or NULL_RTX if not.
5358 Assume X is compared against zero with CMP_CODE and the true
5359 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5361 static rtx
5362 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5364 if (cmp_code != EQ && cmp_code != NE)
5365 return NULL_RTX;
5367 /* Result on X == 0 and X !=0 respectively. */
5368 rtx on_zero, on_nonzero;
5369 if (cmp_code == EQ)
5371 on_zero = true_val;
5372 on_nonzero = false_val;
5374 else
5376 on_zero = false_val;
5377 on_nonzero = true_val;
5380 rtx_code op_code = GET_CODE (on_nonzero);
5381 if ((op_code != CLZ && op_code != CTZ)
5382 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5383 || !CONST_INT_P (on_zero))
5384 return NULL_RTX;
5386 HOST_WIDE_INT op_val;
5387 if (((op_code == CLZ
5388 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5389 || (op_code == CTZ
5390 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5391 && op_val == INTVAL (on_zero))
5392 return on_nonzero;
5394 return NULL_RTX;
5398 /* Simplify CODE, an operation with result mode MODE and three operands,
5399 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5400 a constant. Return 0 if no simplifications is possible. */
5403 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5404 machine_mode op0_mode, rtx op0, rtx op1,
5405 rtx op2)
5407 unsigned int width = GET_MODE_PRECISION (mode);
5408 bool any_change = false;
5409 rtx tem, trueop2;
5411 /* VOIDmode means "infinite" precision. */
5412 if (width == 0)
5413 width = HOST_BITS_PER_WIDE_INT;
5415 switch (code)
5417 case FMA:
5418 /* Simplify negations around the multiplication. */
5419 /* -a * -b + c => a * b + c. */
5420 if (GET_CODE (op0) == NEG)
5422 tem = simplify_unary_operation (NEG, mode, op1, mode);
5423 if (tem)
5424 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5426 else if (GET_CODE (op1) == NEG)
5428 tem = simplify_unary_operation (NEG, mode, op0, mode);
5429 if (tem)
5430 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5433 /* Canonicalize the two multiplication operands. */
5434 /* a * -b + c => -b * a + c. */
5435 if (swap_commutative_operands_p (op0, op1))
5436 std::swap (op0, op1), any_change = true;
5438 if (any_change)
5439 return gen_rtx_FMA (mode, op0, op1, op2);
5440 return NULL_RTX;
5442 case SIGN_EXTRACT:
5443 case ZERO_EXTRACT:
5444 if (CONST_INT_P (op0)
5445 && CONST_INT_P (op1)
5446 && CONST_INT_P (op2)
5447 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5448 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5450 /* Extracting a bit-field from a constant */
5451 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5452 HOST_WIDE_INT op1val = INTVAL (op1);
5453 HOST_WIDE_INT op2val = INTVAL (op2);
5454 if (BITS_BIG_ENDIAN)
5455 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5456 else
5457 val >>= op2val;
5459 if (HOST_BITS_PER_WIDE_INT != op1val)
5461 /* First zero-extend. */
5462 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5463 /* If desired, propagate sign bit. */
5464 if (code == SIGN_EXTRACT
5465 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5466 != 0)
5467 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5470 return gen_int_mode (val, mode);
5472 break;
5474 case IF_THEN_ELSE:
5475 if (CONST_INT_P (op0))
5476 return op0 != const0_rtx ? op1 : op2;
5478 /* Convert c ? a : a into "a". */
5479 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5480 return op1;
5482 /* Convert a != b ? a : b into "a". */
5483 if (GET_CODE (op0) == NE
5484 && ! side_effects_p (op0)
5485 && ! HONOR_NANS (mode)
5486 && ! HONOR_SIGNED_ZEROS (mode)
5487 && ((rtx_equal_p (XEXP (op0, 0), op1)
5488 && rtx_equal_p (XEXP (op0, 1), op2))
5489 || (rtx_equal_p (XEXP (op0, 0), op2)
5490 && rtx_equal_p (XEXP (op0, 1), op1))))
5491 return op1;
5493 /* Convert a == b ? a : b into "b". */
5494 if (GET_CODE (op0) == EQ
5495 && ! side_effects_p (op0)
5496 && ! HONOR_NANS (mode)
5497 && ! HONOR_SIGNED_ZEROS (mode)
5498 && ((rtx_equal_p (XEXP (op0, 0), op1)
5499 && rtx_equal_p (XEXP (op0, 1), op2))
5500 || (rtx_equal_p (XEXP (op0, 0), op2)
5501 && rtx_equal_p (XEXP (op0, 1), op1))))
5502 return op2;
5504 /* Convert (!c) != {0,...,0} ? a : b into
5505 c != {0,...,0} ? b : a for vector modes. */
5506 if (VECTOR_MODE_P (GET_MODE (op1))
5507 && GET_CODE (op0) == NE
5508 && GET_CODE (XEXP (op0, 0)) == NOT
5509 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5511 rtx cv = XEXP (op0, 1);
5512 int nunits = CONST_VECTOR_NUNITS (cv);
5513 bool ok = true;
5514 for (int i = 0; i < nunits; ++i)
5515 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5517 ok = false;
5518 break;
5520 if (ok)
5522 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5523 XEXP (XEXP (op0, 0), 0),
5524 XEXP (op0, 1));
5525 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5526 return retval;
5530 /* Convert x == 0 ? N : clz (x) into clz (x) when
5531 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5532 Similarly for ctz (x). */
5533 if (COMPARISON_P (op0) && !side_effects_p (op0)
5534 && XEXP (op0, 1) == const0_rtx)
5536 rtx simplified
5537 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5538 op1, op2);
5539 if (simplified)
5540 return simplified;
5543 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5545 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5546 ? GET_MODE (XEXP (op0, 1))
5547 : GET_MODE (XEXP (op0, 0)));
5548 rtx temp;
5550 /* Look for happy constants in op1 and op2. */
5551 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5553 HOST_WIDE_INT t = INTVAL (op1);
5554 HOST_WIDE_INT f = INTVAL (op2);
5556 if (t == STORE_FLAG_VALUE && f == 0)
5557 code = GET_CODE (op0);
5558 else if (t == 0 && f == STORE_FLAG_VALUE)
5560 enum rtx_code tmp;
5561 tmp = reversed_comparison_code (op0, NULL);
5562 if (tmp == UNKNOWN)
5563 break;
5564 code = tmp;
5566 else
5567 break;
5569 return simplify_gen_relational (code, mode, cmp_mode,
5570 XEXP (op0, 0), XEXP (op0, 1));
5573 if (cmp_mode == VOIDmode)
5574 cmp_mode = op0_mode;
5575 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5576 cmp_mode, XEXP (op0, 0),
5577 XEXP (op0, 1));
5579 /* See if any simplifications were possible. */
5580 if (temp)
5582 if (CONST_INT_P (temp))
5583 return temp == const0_rtx ? op2 : op1;
5584 else if (temp)
5585 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5588 break;
5590 case VEC_MERGE:
5591 gcc_assert (GET_MODE (op0) == mode);
5592 gcc_assert (GET_MODE (op1) == mode);
5593 gcc_assert (VECTOR_MODE_P (mode));
5594 trueop2 = avoid_constant_pool_reference (op2);
5595 if (CONST_INT_P (trueop2))
5597 int elt_size = GET_MODE_UNIT_SIZE (mode);
5598 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5599 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5600 unsigned HOST_WIDE_INT mask;
5601 if (n_elts == HOST_BITS_PER_WIDE_INT)
5602 mask = -1;
5603 else
5604 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5606 if (!(sel & mask) && !side_effects_p (op0))
5607 return op1;
5608 if ((sel & mask) == mask && !side_effects_p (op1))
5609 return op0;
5611 rtx trueop0 = avoid_constant_pool_reference (op0);
5612 rtx trueop1 = avoid_constant_pool_reference (op1);
5613 if (GET_CODE (trueop0) == CONST_VECTOR
5614 && GET_CODE (trueop1) == CONST_VECTOR)
5616 rtvec v = rtvec_alloc (n_elts);
5617 unsigned int i;
5619 for (i = 0; i < n_elts; i++)
5620 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5621 ? CONST_VECTOR_ELT (trueop0, i)
5622 : CONST_VECTOR_ELT (trueop1, i));
5623 return gen_rtx_CONST_VECTOR (mode, v);
5626 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5627 if no element from a appears in the result. */
5628 if (GET_CODE (op0) == VEC_MERGE)
5630 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5631 if (CONST_INT_P (tem))
5633 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5634 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5635 return simplify_gen_ternary (code, mode, mode,
5636 XEXP (op0, 1), op1, op2);
5637 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5638 return simplify_gen_ternary (code, mode, mode,
5639 XEXP (op0, 0), op1, op2);
5642 if (GET_CODE (op1) == VEC_MERGE)
5644 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5645 if (CONST_INT_P (tem))
5647 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5648 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5649 return simplify_gen_ternary (code, mode, mode,
5650 op0, XEXP (op1, 1), op2);
5651 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5652 return simplify_gen_ternary (code, mode, mode,
5653 op0, XEXP (op1, 0), op2);
5657 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5658 with a. */
5659 if (GET_CODE (op0) == VEC_DUPLICATE
5660 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5661 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5662 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5664 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5665 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5667 if (XEXP (XEXP (op0, 0), 0) == op1
5668 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5669 return op1;
5674 if (rtx_equal_p (op0, op1)
5675 && !side_effects_p (op2) && !side_effects_p (op1))
5676 return op0;
5678 break;
5680 default:
5681 gcc_unreachable ();
5684 return 0;
5687 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5688 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5689 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5691 Works by unpacking OP into a collection of 8-bit values
5692 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5693 and then repacking them again for OUTERMODE. */
5695 static rtx
5696 simplify_immed_subreg (machine_mode outermode, rtx op,
5697 machine_mode innermode, unsigned int byte)
5699 enum {
5700 value_bit = 8,
5701 value_mask = (1 << value_bit) - 1
5703 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5704 int value_start;
5705 int i;
5706 int elem;
5708 int num_elem;
5709 rtx * elems;
5710 int elem_bitsize;
5711 rtx result_s = NULL;
5712 rtvec result_v = NULL;
5713 enum mode_class outer_class;
5714 machine_mode outer_submode;
5715 int max_bitsize;
5717 /* Some ports misuse CCmode. */
5718 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5719 return op;
5721 /* We have no way to represent a complex constant at the rtl level. */
5722 if (COMPLEX_MODE_P (outermode))
5723 return NULL_RTX;
5725 /* We support any size mode. */
5726 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5727 GET_MODE_BITSIZE (innermode));
5729 /* Unpack the value. */
5731 if (GET_CODE (op) == CONST_VECTOR)
5733 num_elem = CONST_VECTOR_NUNITS (op);
5734 elems = &CONST_VECTOR_ELT (op, 0);
5735 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5737 else
5739 num_elem = 1;
5740 elems = &op;
5741 elem_bitsize = max_bitsize;
5743 /* If this asserts, it is too complicated; reducing value_bit may help. */
5744 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5745 /* I don't know how to handle endianness of sub-units. */
5746 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5748 for (elem = 0; elem < num_elem; elem++)
5750 unsigned char * vp;
5751 rtx el = elems[elem];
5753 /* Vectors are kept in target memory order. (This is probably
5754 a mistake.) */
5756 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5757 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5758 / BITS_PER_UNIT);
5759 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5760 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5761 unsigned bytele = (subword_byte % UNITS_PER_WORD
5762 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5763 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5766 switch (GET_CODE (el))
5768 case CONST_INT:
5769 for (i = 0;
5770 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5771 i += value_bit)
5772 *vp++ = INTVAL (el) >> i;
5773 /* CONST_INTs are always logically sign-extended. */
5774 for (; i < elem_bitsize; i += value_bit)
5775 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5776 break;
5778 case CONST_WIDE_INT:
5780 rtx_mode_t val = rtx_mode_t (el, innermode);
5781 unsigned char extend = wi::sign_mask (val);
5782 int prec = wi::get_precision (val);
5784 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5785 *vp++ = wi::extract_uhwi (val, i, value_bit);
5786 for (; i < elem_bitsize; i += value_bit)
5787 *vp++ = extend;
5789 break;
5791 case CONST_DOUBLE:
5792 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5794 unsigned char extend = 0;
5795 /* If this triggers, someone should have generated a
5796 CONST_INT instead. */
5797 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5799 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5800 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5801 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5803 *vp++
5804 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5805 i += value_bit;
5808 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5809 extend = -1;
5810 for (; i < elem_bitsize; i += value_bit)
5811 *vp++ = extend;
5813 else
5815 /* This is big enough for anything on the platform. */
5816 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5817 scalar_float_mode el_mode;
5819 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5820 int bitsize = GET_MODE_BITSIZE (el_mode);
5822 gcc_assert (bitsize <= elem_bitsize);
5823 gcc_assert (bitsize % value_bit == 0);
5825 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5826 GET_MODE (el));
5828 /* real_to_target produces its result in words affected by
5829 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5830 and use WORDS_BIG_ENDIAN instead; see the documentation
5831 of SUBREG in rtl.texi. */
5832 for (i = 0; i < bitsize; i += value_bit)
5834 int ibase;
5835 if (WORDS_BIG_ENDIAN)
5836 ibase = bitsize - 1 - i;
5837 else
5838 ibase = i;
5839 *vp++ = tmp[ibase / 32] >> i % 32;
5842 /* It shouldn't matter what's done here, so fill it with
5843 zero. */
5844 for (; i < elem_bitsize; i += value_bit)
5845 *vp++ = 0;
5847 break;
5849 case CONST_FIXED:
5850 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5852 for (i = 0; i < elem_bitsize; i += value_bit)
5853 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5855 else
5857 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5858 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5859 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5860 i += value_bit)
5861 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5862 >> (i - HOST_BITS_PER_WIDE_INT);
5863 for (; i < elem_bitsize; i += value_bit)
5864 *vp++ = 0;
5866 break;
5868 default:
5869 gcc_unreachable ();
5873 /* Now, pick the right byte to start with. */
5874 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5875 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5876 will already have offset 0. */
5877 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5879 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5880 - byte);
5881 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5882 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5883 byte = (subword_byte % UNITS_PER_WORD
5884 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5887 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5888 so if it's become negative it will instead be very large.) */
5889 gcc_assert (byte < GET_MODE_SIZE (innermode));
5891 /* Convert from bytes to chunks of size value_bit. */
5892 value_start = byte * (BITS_PER_UNIT / value_bit);
5894 /* Re-pack the value. */
5895 num_elem = GET_MODE_NUNITS (outermode);
5897 if (VECTOR_MODE_P (outermode))
5899 result_v = rtvec_alloc (num_elem);
5900 elems = &RTVEC_ELT (result_v, 0);
5902 else
5903 elems = &result_s;
5905 outer_submode = GET_MODE_INNER (outermode);
5906 outer_class = GET_MODE_CLASS (outer_submode);
5907 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5909 gcc_assert (elem_bitsize % value_bit == 0);
5910 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5912 for (elem = 0; elem < num_elem; elem++)
5914 unsigned char *vp;
5916 /* Vectors are stored in target memory order. (This is probably
5917 a mistake.) */
5919 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5920 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5921 / BITS_PER_UNIT);
5922 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5923 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5924 unsigned bytele = (subword_byte % UNITS_PER_WORD
5925 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5926 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5929 switch (outer_class)
5931 case MODE_INT:
5932 case MODE_PARTIAL_INT:
5934 int u;
5935 int base = 0;
5936 int units
5937 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5938 / HOST_BITS_PER_WIDE_INT;
5939 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5940 wide_int r;
5942 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5943 return NULL_RTX;
5944 for (u = 0; u < units; u++)
5946 unsigned HOST_WIDE_INT buf = 0;
5947 for (i = 0;
5948 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5949 i += value_bit)
5950 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5952 tmp[u] = buf;
5953 base += HOST_BITS_PER_WIDE_INT;
5955 r = wide_int::from_array (tmp, units,
5956 GET_MODE_PRECISION (outer_submode));
5957 #if TARGET_SUPPORTS_WIDE_INT == 0
5958 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5959 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5960 return NULL_RTX;
5961 #endif
5962 elems[elem] = immed_wide_int_const (r, outer_submode);
5964 break;
5966 case MODE_FLOAT:
5967 case MODE_DECIMAL_FLOAT:
5969 REAL_VALUE_TYPE r;
5970 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5972 /* real_from_target wants its input in words affected by
5973 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5974 and use WORDS_BIG_ENDIAN instead; see the documentation
5975 of SUBREG in rtl.texi. */
5976 for (i = 0; i < elem_bitsize; i += value_bit)
5978 int ibase;
5979 if (WORDS_BIG_ENDIAN)
5980 ibase = elem_bitsize - 1 - i;
5981 else
5982 ibase = i;
5983 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5986 real_from_target (&r, tmp, outer_submode);
5987 elems[elem] = const_double_from_real_value (r, outer_submode);
5989 break;
5991 case MODE_FRACT:
5992 case MODE_UFRACT:
5993 case MODE_ACCUM:
5994 case MODE_UACCUM:
5996 FIXED_VALUE_TYPE f;
5997 f.data.low = 0;
5998 f.data.high = 0;
5999 f.mode = outer_submode;
6001 for (i = 0;
6002 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6003 i += value_bit)
6004 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6005 for (; i < elem_bitsize; i += value_bit)
6006 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6007 << (i - HOST_BITS_PER_WIDE_INT));
6009 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6011 break;
6013 default:
6014 gcc_unreachable ();
6017 if (VECTOR_MODE_P (outermode))
6018 return gen_rtx_CONST_VECTOR (outermode, result_v);
6019 else
6020 return result_s;
6023 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6024 Return 0 if no simplifications are possible. */
6026 simplify_subreg (machine_mode outermode, rtx op,
6027 machine_mode innermode, unsigned int byte)
6029 /* Little bit of sanity checking. */
6030 gcc_assert (innermode != VOIDmode);
6031 gcc_assert (outermode != VOIDmode);
6032 gcc_assert (innermode != BLKmode);
6033 gcc_assert (outermode != BLKmode);
6035 gcc_assert (GET_MODE (op) == innermode
6036 || GET_MODE (op) == VOIDmode);
6038 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6039 return NULL_RTX;
6041 if (byte >= GET_MODE_SIZE (innermode))
6042 return NULL_RTX;
6044 if (outermode == innermode && !byte)
6045 return op;
6047 if (CONST_SCALAR_INT_P (op)
6048 || CONST_DOUBLE_AS_FLOAT_P (op)
6049 || GET_CODE (op) == CONST_FIXED
6050 || GET_CODE (op) == CONST_VECTOR)
6051 return simplify_immed_subreg (outermode, op, innermode, byte);
6053 /* Changing mode twice with SUBREG => just change it once,
6054 or not at all if changing back op starting mode. */
6055 if (GET_CODE (op) == SUBREG)
6057 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6058 int final_offset = byte + SUBREG_BYTE (op);
6059 rtx newx;
6061 if (outermode == innermostmode
6062 && byte == 0 && SUBREG_BYTE (op) == 0)
6063 return SUBREG_REG (op);
6065 /* The SUBREG_BYTE represents offset, as if the value were stored
6066 in memory. Irritating exception is paradoxical subreg, where
6067 we define SUBREG_BYTE to be 0. On big endian machines, this
6068 value should be negative. For a moment, undo this exception. */
6069 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6071 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6072 if (WORDS_BIG_ENDIAN)
6073 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6074 if (BYTES_BIG_ENDIAN)
6075 final_offset += difference % UNITS_PER_WORD;
6077 if (SUBREG_BYTE (op) == 0
6078 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6080 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6081 if (WORDS_BIG_ENDIAN)
6082 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6083 if (BYTES_BIG_ENDIAN)
6084 final_offset += difference % UNITS_PER_WORD;
6087 /* See whether resulting subreg will be paradoxical. */
6088 if (!paradoxical_subreg_p (outermode, innermostmode))
6090 /* In nonparadoxical subregs we can't handle negative offsets. */
6091 if (final_offset < 0)
6092 return NULL_RTX;
6093 /* Bail out in case resulting subreg would be incorrect. */
6094 if (final_offset % GET_MODE_SIZE (outermode)
6095 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6096 return NULL_RTX;
6098 else
6100 int offset = 0;
6101 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6103 /* In paradoxical subreg, see if we are still looking on lower part.
6104 If so, our SUBREG_BYTE will be 0. */
6105 if (WORDS_BIG_ENDIAN)
6106 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6107 if (BYTES_BIG_ENDIAN)
6108 offset += difference % UNITS_PER_WORD;
6109 if (offset == final_offset)
6110 final_offset = 0;
6111 else
6112 return NULL_RTX;
6115 /* Recurse for further possible simplifications. */
6116 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6117 final_offset);
6118 if (newx)
6119 return newx;
6120 if (validate_subreg (outermode, innermostmode,
6121 SUBREG_REG (op), final_offset))
6123 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6124 if (SUBREG_PROMOTED_VAR_P (op)
6125 && SUBREG_PROMOTED_SIGN (op) >= 0
6126 && GET_MODE_CLASS (outermode) == MODE_INT
6127 && IN_RANGE (GET_MODE_SIZE (outermode),
6128 GET_MODE_SIZE (innermode),
6129 GET_MODE_SIZE (innermostmode))
6130 && subreg_lowpart_p (newx))
6132 SUBREG_PROMOTED_VAR_P (newx) = 1;
6133 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6135 return newx;
6137 return NULL_RTX;
6140 /* SUBREG of a hard register => just change the register number
6141 and/or mode. If the hard register is not valid in that mode,
6142 suppress this simplification. If the hard register is the stack,
6143 frame, or argument pointer, leave this as a SUBREG. */
6145 if (REG_P (op) && HARD_REGISTER_P (op))
6147 unsigned int regno, final_regno;
6149 regno = REGNO (op);
6150 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6151 if (HARD_REGISTER_NUM_P (final_regno))
6153 rtx x;
6154 int final_offset = byte;
6156 /* Adjust offset for paradoxical subregs. */
6157 if (byte == 0
6158 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6160 int difference = (GET_MODE_SIZE (innermode)
6161 - GET_MODE_SIZE (outermode));
6162 if (WORDS_BIG_ENDIAN)
6163 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6164 if (BYTES_BIG_ENDIAN)
6165 final_offset += difference % UNITS_PER_WORD;
6168 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6170 /* Propagate original regno. We don't have any way to specify
6171 the offset inside original regno, so do so only for lowpart.
6172 The information is used only by alias analysis that can not
6173 grog partial register anyway. */
6175 if (subreg_lowpart_offset (outermode, innermode) == byte)
6176 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6177 return x;
6181 /* If we have a SUBREG of a register that we are replacing and we are
6182 replacing it with a MEM, make a new MEM and try replacing the
6183 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6184 or if we would be widening it. */
6186 if (MEM_P (op)
6187 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6188 /* Allow splitting of volatile memory references in case we don't
6189 have instruction to move the whole thing. */
6190 && (! MEM_VOLATILE_P (op)
6191 || ! have_insn_for (SET, innermode))
6192 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6193 return adjust_address_nv (op, outermode, byte);
6195 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6196 of two parts. */
6197 if (GET_CODE (op) == CONCAT
6198 || GET_CODE (op) == VEC_CONCAT)
6200 unsigned int part_size, final_offset;
6201 rtx part, res;
6203 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6204 if (part_mode == VOIDmode)
6205 part_mode = GET_MODE_INNER (GET_MODE (op));
6206 part_size = GET_MODE_SIZE (part_mode);
6207 if (byte < part_size)
6209 part = XEXP (op, 0);
6210 final_offset = byte;
6212 else
6214 part = XEXP (op, 1);
6215 final_offset = byte - part_size;
6218 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6219 return NULL_RTX;
6221 part_mode = GET_MODE (part);
6222 if (part_mode == VOIDmode)
6223 part_mode = GET_MODE_INNER (GET_MODE (op));
6224 res = simplify_subreg (outermode, part, part_mode, final_offset);
6225 if (res)
6226 return res;
6227 if (validate_subreg (outermode, part_mode, part, final_offset))
6228 return gen_rtx_SUBREG (outermode, part, final_offset);
6229 return NULL_RTX;
6232 /* A SUBREG resulting from a zero extension may fold to zero if
6233 it extracts higher bits that the ZERO_EXTEND's source bits. */
6234 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6236 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6237 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6238 return CONST0_RTX (outermode);
6241 if (SCALAR_INT_MODE_P (outermode)
6242 && SCALAR_INT_MODE_P (innermode)
6243 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6244 && byte == subreg_lowpart_offset (outermode, innermode))
6246 rtx tem = simplify_truncation (outermode, op, innermode);
6247 if (tem)
6248 return tem;
6251 return NULL_RTX;
6254 /* Make a SUBREG operation or equivalent if it folds. */
6257 simplify_gen_subreg (machine_mode outermode, rtx op,
6258 machine_mode innermode, unsigned int byte)
6260 rtx newx;
6262 newx = simplify_subreg (outermode, op, innermode, byte);
6263 if (newx)
6264 return newx;
6266 if (GET_CODE (op) == SUBREG
6267 || GET_CODE (op) == CONCAT
6268 || GET_MODE (op) == VOIDmode)
6269 return NULL_RTX;
6271 if (validate_subreg (outermode, innermode, op, byte))
6272 return gen_rtx_SUBREG (outermode, op, byte);
6274 return NULL_RTX;
6277 /* Generates a subreg to get the least significant part of EXPR (in mode
6278 INNER_MODE) to OUTER_MODE. */
6281 lowpart_subreg (machine_mode outer_mode, rtx expr,
6282 machine_mode inner_mode)
6284 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6285 subreg_lowpart_offset (outer_mode, inner_mode));
6288 /* Simplify X, an rtx expression.
6290 Return the simplified expression or NULL if no simplifications
6291 were possible.
6293 This is the preferred entry point into the simplification routines;
6294 however, we still allow passes to call the more specific routines.
6296 Right now GCC has three (yes, three) major bodies of RTL simplification
6297 code that need to be unified.
6299 1. fold_rtx in cse.c. This code uses various CSE specific
6300 information to aid in RTL simplification.
6302 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6303 it uses combine specific information to aid in RTL
6304 simplification.
6306 3. The routines in this file.
6309 Long term we want to only have one body of simplification code; to
6310 get to that state I recommend the following steps:
6312 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6313 which are not pass dependent state into these routines.
6315 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6316 use this routine whenever possible.
6318 3. Allow for pass dependent state to be provided to these
6319 routines and add simplifications based on the pass dependent
6320 state. Remove code from cse.c & combine.c that becomes
6321 redundant/dead.
6323 It will take time, but ultimately the compiler will be easier to
6324 maintain and improve. It's totally silly that when we add a
6325 simplification that it needs to be added to 4 places (3 for RTL
6326 simplification and 1 for tree simplification. */
6329 simplify_rtx (const_rtx x)
6331 const enum rtx_code code = GET_CODE (x);
6332 const machine_mode mode = GET_MODE (x);
6334 switch (GET_RTX_CLASS (code))
6336 case RTX_UNARY:
6337 return simplify_unary_operation (code, mode,
6338 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6339 case RTX_COMM_ARITH:
6340 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6341 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6343 /* Fall through. */
6345 case RTX_BIN_ARITH:
6346 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6348 case RTX_TERNARY:
6349 case RTX_BITFIELD_OPS:
6350 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6351 XEXP (x, 0), XEXP (x, 1),
6352 XEXP (x, 2));
6354 case RTX_COMPARE:
6355 case RTX_COMM_COMPARE:
6356 return simplify_relational_operation (code, mode,
6357 ((GET_MODE (XEXP (x, 0))
6358 != VOIDmode)
6359 ? GET_MODE (XEXP (x, 0))
6360 : GET_MODE (XEXP (x, 1))),
6361 XEXP (x, 0),
6362 XEXP (x, 1));
6364 case RTX_EXTRA:
6365 if (code == SUBREG)
6366 return simplify_subreg (mode, SUBREG_REG (x),
6367 GET_MODE (SUBREG_REG (x)),
6368 SUBREG_BYTE (x));
6369 break;
6371 case RTX_OBJ:
6372 if (code == LO_SUM)
6374 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6375 if (GET_CODE (XEXP (x, 0)) == HIGH
6376 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6377 return XEXP (x, 1);
6379 break;
6381 default:
6382 break;
6384 return NULL;