ChangeLog: Add forgotten entry for r246322.
[official-gcc.git] / gcc / simplify-rtx.c
blob640ccb7cb95933a6991bf1599099f7aed455daec
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 #if TARGET_SUPPORTS_WIDE_INT
92 else if (CONST_WIDE_INT_P (x))
94 unsigned int i;
95 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
96 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
97 return false;
98 for (i = 0; i < elts - 1; i++)
99 if (CONST_WIDE_INT_ELT (x, i) != 0)
100 return false;
101 val = CONST_WIDE_INT_ELT (x, elts - 1);
102 width %= HOST_BITS_PER_WIDE_INT;
103 if (width == 0)
104 width = HOST_BITS_PER_WIDE_INT;
106 #else
107 else if (width <= HOST_BITS_PER_DOUBLE_INT
108 && CONST_DOUBLE_AS_INT_P (x)
109 && CONST_DOUBLE_LOW (x) == 0)
111 val = CONST_DOUBLE_HIGH (x);
112 width -= HOST_BITS_PER_WIDE_INT;
114 #endif
115 else
116 /* X is not an integer constant. */
117 return false;
119 if (width < HOST_BITS_PER_WIDE_INT)
120 val &= (HOST_WIDE_INT_1U << width) - 1;
121 return val == (HOST_WIDE_INT_1U << (width - 1));
124 /* Test whether VAL is equal to the most significant bit of mode MODE
125 (after masking with the mode mask of MODE). Returns false if the
126 precision of MODE is too large to handle. */
128 bool
129 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 unsigned int width;
133 if (GET_MODE_CLASS (mode) != MODE_INT)
134 return false;
136 width = GET_MODE_PRECISION (mode);
137 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
138 return false;
140 val &= GET_MODE_MASK (mode);
141 return val == (HOST_WIDE_INT_1U << (width - 1));
144 /* Test whether the most significant bit of mode MODE is set in VAL.
145 Returns false if the precision of MODE is too large to handle. */
146 bool
147 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
149 unsigned int width;
151 if (GET_MODE_CLASS (mode) != MODE_INT)
152 return false;
154 width = GET_MODE_PRECISION (mode);
155 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
156 return false;
158 val &= HOST_WIDE_INT_1U << (width - 1);
159 return val != 0;
162 /* Test whether the most significant bit of mode MODE is clear in VAL.
163 Returns false if the precision of MODE is too large to handle. */
164 bool
165 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
167 unsigned int width;
169 if (GET_MODE_CLASS (mode) != MODE_INT)
170 return false;
172 width = GET_MODE_PRECISION (mode);
173 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
174 return false;
176 val &= HOST_WIDE_INT_1U << (width - 1);
177 return val == 0;
180 /* Make a binary operation by properly ordering the operands and
181 seeing if the expression folds. */
184 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
185 rtx op1)
187 rtx tem;
189 /* If this simplifies, do it. */
190 tem = simplify_binary_operation (code, mode, op0, op1);
191 if (tem)
192 return tem;
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
196 && swap_commutative_operands_p (op0, op1))
197 std::swap (op0, op1);
199 return gen_rtx_fmt_ee (code, mode, op0, op1);
202 /* If X is a MEM referencing the constant pool, return the real value.
203 Otherwise return X. */
205 avoid_constant_pool_reference (rtx x)
207 rtx c, tmp, addr;
208 machine_mode cmode;
209 HOST_WIDE_INT offset = 0;
211 switch (GET_CODE (x))
213 case MEM:
214 break;
216 case FLOAT_EXTEND:
217 /* Handle float extensions of constant pool references. */
218 tmp = XEXP (x, 0);
219 c = avoid_constant_pool_reference (tmp);
220 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
221 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
222 GET_MODE (x));
223 return x;
225 default:
226 return x;
229 if (GET_MODE (x) == BLKmode)
230 return x;
232 addr = XEXP (x, 0);
234 /* Call target hook to avoid the effects of -fpic etc.... */
235 addr = targetm.delegitimize_address (addr);
237 /* Split the address into a base and integer offset. */
238 if (GET_CODE (addr) == CONST
239 && GET_CODE (XEXP (addr, 0)) == PLUS
240 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
242 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
243 addr = XEXP (XEXP (addr, 0), 0);
246 if (GET_CODE (addr) == LO_SUM)
247 addr = XEXP (addr, 1);
249 /* If this is a constant pool reference, we can turn it into its
250 constant and hope that simplifications happen. */
251 if (GET_CODE (addr) == SYMBOL_REF
252 && CONSTANT_POOL_ADDRESS_P (addr))
254 c = get_pool_constant (addr);
255 cmode = get_pool_mode (addr);
257 /* If we're accessing the constant in a different mode than it was
258 originally stored, attempt to fix that up via subreg simplifications.
259 If that fails we have no choice but to return the original memory. */
260 if (offset == 0 && cmode == GET_MODE (x))
261 return c;
262 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
264 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
265 if (tem && CONSTANT_P (tem))
266 return tem;
270 return x;
273 /* Simplify a MEM based on its attributes. This is the default
274 delegitimize_address target hook, and it's recommended that every
275 overrider call it. */
278 delegitimize_mem_from_attrs (rtx x)
280 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
281 use their base addresses as equivalent. */
282 if (MEM_P (x)
283 && MEM_EXPR (x)
284 && MEM_OFFSET_KNOWN_P (x))
286 tree decl = MEM_EXPR (x);
287 machine_mode mode = GET_MODE (x);
288 HOST_WIDE_INT offset = 0;
290 switch (TREE_CODE (decl))
292 default:
293 decl = NULL;
294 break;
296 case VAR_DECL:
297 break;
299 case ARRAY_REF:
300 case ARRAY_RANGE_REF:
301 case COMPONENT_REF:
302 case BIT_FIELD_REF:
303 case REALPART_EXPR:
304 case IMAGPART_EXPR:
305 case VIEW_CONVERT_EXPR:
307 HOST_WIDE_INT bitsize, bitpos;
308 tree toffset;
309 int unsignedp, reversep, volatilep = 0;
311 decl
312 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
313 &unsignedp, &reversep, &volatilep);
314 if (bitsize != GET_MODE_BITSIZE (mode)
315 || (bitpos % BITS_PER_UNIT)
316 || (toffset && !tree_fits_shwi_p (toffset)))
317 decl = NULL;
318 else
320 offset += bitpos / BITS_PER_UNIT;
321 if (toffset)
322 offset += tree_to_shwi (toffset);
324 break;
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
336 rtx newx;
338 offset += MEM_OFFSET (x);
340 newx = DECL_RTL (decl);
342 if (MEM_P (newx))
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
346 /* Avoid creating a new MEM needlessly if we already had
347 the same address. We do if there's no OFFSET and the
348 old address X is identical to NEWX, or if X is of the
349 form (plus NEWX OFFSET), or the NEWX is of the form
350 (plus Y (const_int Z)) and X is that with the offset
351 added: (plus Y (const_int Z+OFFSET)). */
352 if (!((offset == 0
353 || (GET_CODE (o) == PLUS
354 && GET_CODE (XEXP (o, 1)) == CONST_INT
355 && (offset == INTVAL (XEXP (o, 1))
356 || (GET_CODE (n) == PLUS
357 && GET_CODE (XEXP (n, 1)) == CONST_INT
358 && (INTVAL (XEXP (n, 1)) + offset
359 == INTVAL (XEXP (o, 1)))
360 && (n = XEXP (n, 0))))
361 && (o = XEXP (o, 0))))
362 && rtx_equal_p (o, n)))
363 x = adjust_address_nv (newx, mode, offset);
365 else if (GET_MODE (x) == GET_MODE (newx)
366 && offset == 0)
367 x = newx;
371 return x;
374 /* Make a unary operation by first seeing if it folds and otherwise making
375 the specified operation. */
378 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
379 machine_mode op_mode)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
385 return tem;
387 return gen_rtx_fmt_e (code, mode, op);
390 /* Likewise for ternary operations. */
393 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
394 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
396 rtx tem;
398 /* If this simplifies, use it. */
399 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
400 op0, op1, op2)))
401 return tem;
403 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
406 /* Likewise, for relational operations.
407 CMP_MODE specifies mode comparison is done in. */
410 simplify_gen_relational (enum rtx_code code, machine_mode mode,
411 machine_mode cmp_mode, rtx op0, rtx op1)
413 rtx tem;
415 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
416 op0, op1)))
417 return tem;
419 return gen_rtx_fmt_ee (code, mode, op0, op1);
422 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
423 and simplify the result. If FN is non-NULL, call this callback on each
424 X, if it returns non-NULL, replace X with its return value and simplify the
425 result. */
428 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
429 rtx (*fn) (rtx, const_rtx, void *), void *data)
431 enum rtx_code code = GET_CODE (x);
432 machine_mode mode = GET_MODE (x);
433 machine_mode op_mode;
434 const char *fmt;
435 rtx op0, op1, op2, newx, op;
436 rtvec vec, newvec;
437 int i, j;
439 if (__builtin_expect (fn != NULL, 0))
441 newx = fn (x, old_rtx, data);
442 if (newx)
443 return newx;
445 else if (rtx_equal_p (x, old_rtx))
446 return copy_rtx ((rtx) data);
448 switch (GET_RTX_CLASS (code))
450 case RTX_UNARY:
451 op0 = XEXP (x, 0);
452 op_mode = GET_MODE (op0);
453 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
454 if (op0 == XEXP (x, 0))
455 return x;
456 return simplify_gen_unary (code, mode, op0, op_mode);
458 case RTX_BIN_ARITH:
459 case RTX_COMM_ARITH:
460 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
461 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
463 return x;
464 return simplify_gen_binary (code, mode, op0, op1);
466 case RTX_COMPARE:
467 case RTX_COMM_COMPARE:
468 op0 = XEXP (x, 0);
469 op1 = XEXP (x, 1);
470 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
471 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
472 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
474 return x;
475 return simplify_gen_relational (code, mode, op_mode, op0, op1);
477 case RTX_TERNARY:
478 case RTX_BITFIELD_OPS:
479 op0 = XEXP (x, 0);
480 op_mode = GET_MODE (op0);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
483 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
484 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
485 return x;
486 if (op_mode == VOIDmode)
487 op_mode = GET_MODE (op0);
488 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
490 case RTX_EXTRA:
491 if (code == SUBREG)
493 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
494 if (op0 == SUBREG_REG (x))
495 return x;
496 op0 = simplify_gen_subreg (GET_MODE (x), op0,
497 GET_MODE (SUBREG_REG (x)),
498 SUBREG_BYTE (x));
499 return op0 ? op0 : x;
501 break;
503 case RTX_OBJ:
504 if (code == MEM)
506 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
507 if (op0 == XEXP (x, 0))
508 return x;
509 return replace_equiv_address_nv (x, op0);
511 else if (code == LO_SUM)
513 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
514 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
516 /* (lo_sum (high x) y) -> y where x and y have the same base. */
517 if (GET_CODE (op0) == HIGH)
519 rtx base0, base1, offset0, offset1;
520 split_const (XEXP (op0, 0), &base0, &offset0);
521 split_const (op1, &base1, &offset1);
522 if (rtx_equal_p (base0, base1))
523 return op1;
526 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
527 return x;
528 return gen_rtx_LO_SUM (mode, op0, op1);
530 break;
532 default:
533 break;
536 newx = x;
537 fmt = GET_RTX_FORMAT (code);
538 for (i = 0; fmt[i]; i++)
539 switch (fmt[i])
541 case 'E':
542 vec = XVEC (x, i);
543 newvec = XVEC (newx, i);
544 for (j = 0; j < GET_NUM_ELEM (vec); j++)
546 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
547 old_rtx, fn, data);
548 if (op != RTVEC_ELT (vec, j))
550 if (newvec == vec)
552 newvec = shallow_copy_rtvec (vec);
553 if (x == newx)
554 newx = shallow_copy_rtx (x);
555 XVEC (newx, i) = newvec;
557 RTVEC_ELT (newvec, j) = op;
560 break;
562 case 'e':
563 if (XEXP (x, i))
565 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
566 if (op != XEXP (x, i))
568 if (x == newx)
569 newx = shallow_copy_rtx (x);
570 XEXP (newx, i) = op;
573 break;
575 return newx;
578 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
579 resulting RTX. Return a new RTX which is as simplified as possible. */
582 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
584 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
587 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
588 Only handle cases where the truncated value is inherently an rvalue.
590 RTL provides two ways of truncating a value:
592 1. a lowpart subreg. This form is only a truncation when both
593 the outer and inner modes (here MODE and OP_MODE respectively)
594 are scalar integers, and only then when the subreg is used as
595 an rvalue.
597 It is only valid to form such truncating subregs if the
598 truncation requires no action by the target. The onus for
599 proving this is on the creator of the subreg -- e.g. the
600 caller to simplify_subreg or simplify_gen_subreg -- and typically
601 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
603 2. a TRUNCATE. This form handles both scalar and compound integers.
605 The first form is preferred where valid. However, the TRUNCATE
606 handling in simplify_unary_operation turns the second form into the
607 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
608 so it is generally safe to form rvalue truncations using:
610 simplify_gen_unary (TRUNCATE, ...)
612 and leave simplify_unary_operation to work out which representation
613 should be used.
615 Because of the proof requirements on (1), simplify_truncation must
616 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
617 regardless of whether the outer truncation came from a SUBREG or a
618 TRUNCATE. For example, if the caller has proven that an SImode
619 truncation of:
621 (and:DI X Y)
623 is a no-op and can be represented as a subreg, it does not follow
624 that SImode truncations of X and Y are also no-ops. On a target
625 like 64-bit MIPS that requires SImode values to be stored in
626 sign-extended form, an SImode truncation of:
628 (and:DI (reg:DI X) (const_int 63))
630 is trivially a no-op because only the lower 6 bits can be set.
631 However, X is still an arbitrary 64-bit number and so we cannot
632 assume that truncating it too is a no-op. */
634 static rtx
635 simplify_truncation (machine_mode mode, rtx op,
636 machine_mode op_mode)
638 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
639 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
640 gcc_assert (precision <= op_precision);
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op) == ZERO_EXTEND
644 || GET_CODE (op) == SIGN_EXTEND)
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
651 mode. */
652 machine_mode origmode = GET_MODE (XEXP (op, 0));
653 if (mode == origmode)
654 return XEXP (op, 0);
655 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
656 return simplify_gen_unary (TRUNCATE, mode,
657 XEXP (op, 0), origmode);
658 else
659 return simplify_gen_unary (GET_CODE (op), mode,
660 XEXP (op, 0), origmode);
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
666 if (1
667 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
668 && (GET_CODE (op) == PLUS
669 || GET_CODE (op) == MINUS
670 || GET_CODE (op) == MULT))
672 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
673 if (op0)
675 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
676 if (op1)
677 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op) == LSHIFTRT
685 || GET_CODE (op) == ASHIFTRT)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision <= op_precision
691 && CONST_INT_P (XEXP (op, 1))
692 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFTRT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op) == LSHIFTRT
702 || GET_CODE (op) == ASHIFTRT)
703 && CONST_INT_P (XEXP (op, 1))
704 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
706 && UINTVAL (XEXP (op, 1)) < precision)
707 return simplify_gen_binary (LSHIFTRT, mode,
708 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op) == ASHIFT
714 && CONST_INT_P (XEXP (op, 1))
715 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
718 && UINTVAL (XEXP (op, 1)) < precision)
719 return simplify_gen_binary (ASHIFT, mode,
720 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
724 and C2. */
725 if (GET_CODE (op) == AND
726 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
728 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
729 && CONST_INT_P (XEXP (op, 1)))
731 rtx op0 = (XEXP (XEXP (op, 0), 0));
732 rtx shift_op = XEXP (XEXP (op, 0), 1);
733 rtx mask_op = XEXP (op, 1);
734 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
735 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
737 if (shift < precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode) >> shift) & mask)
741 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
742 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
743 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
745 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
746 return simplify_gen_binary (AND, mode, op0, mask_op);
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
752 changing len. */
753 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
754 && REG_P (XEXP (op, 0))
755 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
756 && CONST_INT_P (XEXP (op, 1))
757 && CONST_INT_P (XEXP (op, 2)))
759 rtx op0 = XEXP (op, 0);
760 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
761 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
762 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
764 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
765 if (op0)
767 pos -= op_precision - precision;
768 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
769 XEXP (op, 1), GEN_INT (pos));
772 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
774 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
775 if (op0)
776 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
777 XEXP (op, 1), XEXP (op, 2));
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op) == LSHIFTRT
783 || GET_CODE (op) == ASHIFTRT)
784 && SCALAR_INT_MODE_P (mode)
785 && SCALAR_INT_MODE_P (op_mode)
786 && precision >= BITS_PER_WORD
787 && 2 * precision <= op_precision
788 && CONST_INT_P (XEXP (op, 1))
789 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
790 && UINTVAL (XEXP (op, 1)) < op_precision)
792 int byte = subreg_lowpart_offset (mode, op_mode);
793 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
794 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
795 (WORDS_BIG_ENDIAN
796 ? byte - shifted_bytes
797 : byte + shifted_bytes));
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op) == LSHIFTRT
804 || GET_CODE (op) == ASHIFTRT)
805 && SCALAR_INT_MODE_P (op_mode)
806 && MEM_P (XEXP (op, 0))
807 && CONST_INT_P (XEXP (op, 1))
808 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
809 && INTVAL (XEXP (op, 1)) > 0
810 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
811 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
812 MEM_ADDR_SPACE (XEXP (op, 0)))
813 && ! MEM_VOLATILE_P (XEXP (op, 0))
814 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
815 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
817 int byte = subreg_lowpart_offset (mode, op_mode);
818 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
819 return adjust_address_nv (XEXP (op, 0), mode,
820 (WORDS_BIG_ENDIAN
821 ? byte - shifted_bytes
822 : byte + shifted_bytes));
825 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
826 (OP:SI foo:SI) if OP is NEG or ABS. */
827 if ((GET_CODE (op) == ABS
828 || GET_CODE (op) == NEG)
829 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
830 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
831 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
832 return simplify_gen_unary (GET_CODE (op), mode,
833 XEXP (XEXP (op, 0), 0), mode);
835 /* (truncate:A (subreg:B (truncate:C X) 0)) is
836 (truncate:A X). */
837 if (GET_CODE (op) == SUBREG
838 && SCALAR_INT_MODE_P (mode)
839 && SCALAR_INT_MODE_P (op_mode)
840 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
841 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
842 && subreg_lowpart_p (op))
844 rtx inner = XEXP (SUBREG_REG (op), 0);
845 if (GET_MODE_PRECISION (mode)
846 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
847 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
848 else
849 /* If subreg above is paradoxical and C is narrower
850 than A, return (subreg:A (truncate:C X) 0). */
851 return simplify_gen_subreg (mode, SUBREG_REG (op),
852 GET_MODE (SUBREG_REG (op)), 0);
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op) == TRUNCATE)
857 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
858 GET_MODE (XEXP (op, 0)));
860 return NULL_RTX;
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
867 simplify_unary_operation (enum rtx_code code, machine_mode mode,
868 rtx op, machine_mode op_mode)
870 rtx trueop, tem;
872 trueop = avoid_constant_pool_reference (op);
874 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
875 if (tem)
876 return tem;
878 return simplify_unary_operation_1 (code, mode, op);
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
882 to be exact. */
884 static bool
885 exact_int_to_float_conversion_p (const_rtx op)
887 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
888 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode != VOIDmode);
891 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
892 int in_bits = in_prec;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode))
895 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
896 if (GET_CODE (op) == FLOAT)
897 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
898 else if (GET_CODE (op) == UNSIGNED_FLOAT)
899 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
900 else
901 gcc_unreachable ();
902 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
904 return in_bits <= out_bits;
907 /* Perform some simplifications we can do even if the operands
908 aren't constant. */
909 static rtx
910 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
912 enum rtx_code reversed;
913 rtx temp;
915 switch (code)
917 case NOT:
918 /* (not (not X)) == X. */
919 if (GET_CODE (op) == NOT)
920 return XEXP (op, 0);
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op)
925 && (mode == BImode || STORE_FLAG_VALUE == -1)
926 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
927 return simplify_gen_relational (reversed, mode, VOIDmode,
928 XEXP (op, 0), XEXP (op, 1));
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op) == PLUS
932 && XEXP (op, 1) == constm1_rtx)
933 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
935 /* Similarly, (not (neg X)) is (plus X -1). */
936 if (GET_CODE (op) == NEG)
937 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
938 CONSTM1_RTX (mode));
940 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
941 if (GET_CODE (op) == XOR
942 && CONST_INT_P (XEXP (op, 1))
943 && (temp = simplify_unary_operation (NOT, mode,
944 XEXP (op, 1), mode)) != 0)
945 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
947 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
948 if (GET_CODE (op) == PLUS
949 && CONST_INT_P (XEXP (op, 1))
950 && mode_signbit_p (mode, XEXP (op, 1))
951 && (temp = simplify_unary_operation (NOT, mode,
952 XEXP (op, 1), mode)) != 0)
953 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
956 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
957 operands other than 1, but that is not valid. We could do a
958 similar simplification for (not (lshiftrt C X)) where C is
959 just the sign bit, but this doesn't seem common enough to
960 bother with. */
961 if (GET_CODE (op) == ASHIFT
962 && XEXP (op, 0) == const1_rtx)
964 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
965 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
968 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
969 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
970 so we can perform the above simplification. */
971 if (STORE_FLAG_VALUE == -1
972 && GET_CODE (op) == ASHIFTRT
973 && CONST_INT_P (XEXP (op, 1))
974 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
975 return simplify_gen_relational (GE, mode, VOIDmode,
976 XEXP (op, 0), const0_rtx);
979 if (GET_CODE (op) == SUBREG
980 && subreg_lowpart_p (op)
981 && (GET_MODE_SIZE (GET_MODE (op))
982 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
983 && GET_CODE (SUBREG_REG (op)) == ASHIFT
984 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
986 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
987 rtx x;
989 x = gen_rtx_ROTATE (inner_mode,
990 simplify_gen_unary (NOT, inner_mode, const1_rtx,
991 inner_mode),
992 XEXP (SUBREG_REG (op), 1));
993 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
994 if (temp)
995 return temp;
998 /* Apply De Morgan's laws to reduce number of patterns for machines
999 with negating logical insns (and-not, nand, etc.). If result has
1000 only one NOT, put it first, since that is how the patterns are
1001 coded. */
1002 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1004 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1005 machine_mode op_mode;
1007 op_mode = GET_MODE (in1);
1008 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1010 op_mode = GET_MODE (in2);
1011 if (op_mode == VOIDmode)
1012 op_mode = mode;
1013 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1015 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1016 std::swap (in1, in2);
1018 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1019 mode, in1, in2);
1022 /* (not (bswap x)) -> (bswap (not x)). */
1023 if (GET_CODE (op) == BSWAP)
1025 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1026 return simplify_gen_unary (BSWAP, mode, x, mode);
1028 break;
1030 case NEG:
1031 /* (neg (neg X)) == X. */
1032 if (GET_CODE (op) == NEG)
1033 return XEXP (op, 0);
1035 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1036 If comparison is not reversible use
1037 x ? y : (neg y). */
1038 if (GET_CODE (op) == IF_THEN_ELSE)
1040 rtx cond = XEXP (op, 0);
1041 rtx true_rtx = XEXP (op, 1);
1042 rtx false_rtx = XEXP (op, 2);
1044 if ((GET_CODE (true_rtx) == NEG
1045 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1046 || (GET_CODE (false_rtx) == NEG
1047 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1049 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1050 temp = reversed_comparison (cond, mode);
1051 else
1053 temp = cond;
1054 std::swap (true_rtx, false_rtx);
1056 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1057 mode, temp, true_rtx, false_rtx);
1061 /* (neg (plus X 1)) can become (not X). */
1062 if (GET_CODE (op) == PLUS
1063 && XEXP (op, 1) == const1_rtx)
1064 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1066 /* Similarly, (neg (not X)) is (plus X 1). */
1067 if (GET_CODE (op) == NOT)
1068 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1069 CONST1_RTX (mode));
1071 /* (neg (minus X Y)) can become (minus Y X). This transformation
1072 isn't safe for modes with signed zeros, since if X and Y are
1073 both +0, (minus Y X) is the same as (minus X Y). If the
1074 rounding mode is towards +infinity (or -infinity) then the two
1075 expressions will be rounded differently. */
1076 if (GET_CODE (op) == MINUS
1077 && !HONOR_SIGNED_ZEROS (mode)
1078 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1079 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1081 if (GET_CODE (op) == PLUS
1082 && !HONOR_SIGNED_ZEROS (mode)
1083 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1085 /* (neg (plus A C)) is simplified to (minus -C A). */
1086 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1087 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1089 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1090 if (temp)
1091 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1094 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1095 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1096 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1099 /* (neg (mult A B)) becomes (mult A (neg B)).
1100 This works even for floating-point values. */
1101 if (GET_CODE (op) == MULT
1102 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1104 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1105 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1108 /* NEG commutes with ASHIFT since it is multiplication. Only do
1109 this if we can then eliminate the NEG (e.g., if the operand
1110 is a constant). */
1111 if (GET_CODE (op) == ASHIFT)
1113 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1114 if (temp)
1115 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1118 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1119 C is equal to the width of MODE minus 1. */
1120 if (GET_CODE (op) == ASHIFTRT
1121 && CONST_INT_P (XEXP (op, 1))
1122 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1123 return simplify_gen_binary (LSHIFTRT, mode,
1124 XEXP (op, 0), XEXP (op, 1));
1126 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1127 C is equal to the width of MODE minus 1. */
1128 if (GET_CODE (op) == LSHIFTRT
1129 && CONST_INT_P (XEXP (op, 1))
1130 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1131 return simplify_gen_binary (ASHIFTRT, mode,
1132 XEXP (op, 0), XEXP (op, 1));
1134 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1135 if (GET_CODE (op) == XOR
1136 && XEXP (op, 1) == const1_rtx
1137 && nonzero_bits (XEXP (op, 0), mode) == 1)
1138 return plus_constant (mode, XEXP (op, 0), -1);
1140 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1141 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1142 if (GET_CODE (op) == LT
1143 && XEXP (op, 1) == const0_rtx
1144 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1146 machine_mode inner = GET_MODE (XEXP (op, 0));
1147 int isize = GET_MODE_PRECISION (inner);
1148 if (STORE_FLAG_VALUE == 1)
1150 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1151 GEN_INT (isize - 1));
1152 if (mode == inner)
1153 return temp;
1154 if (GET_MODE_PRECISION (mode) > isize)
1155 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1156 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1158 else if (STORE_FLAG_VALUE == -1)
1160 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1161 GEN_INT (isize - 1));
1162 if (mode == inner)
1163 return temp;
1164 if (GET_MODE_PRECISION (mode) > isize)
1165 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1166 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1169 break;
1171 case TRUNCATE:
1172 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1173 with the umulXi3_highpart patterns. */
1174 if (GET_CODE (op) == LSHIFTRT
1175 && GET_CODE (XEXP (op, 0)) == MULT)
1176 break;
1178 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1180 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1182 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1183 if (temp)
1184 return temp;
1186 /* We can't handle truncation to a partial integer mode here
1187 because we don't know the real bitsize of the partial
1188 integer mode. */
1189 break;
1192 if (GET_MODE (op) != VOIDmode)
1194 temp = simplify_truncation (mode, op, GET_MODE (op));
1195 if (temp)
1196 return temp;
1199 /* If we know that the value is already truncated, we can
1200 replace the TRUNCATE with a SUBREG. */
1201 if (GET_MODE_NUNITS (mode) == 1
1202 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1203 || truncated_to_mode (mode, op)))
1205 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1206 if (temp)
1207 return temp;
1210 /* A truncate of a comparison can be replaced with a subreg if
1211 STORE_FLAG_VALUE permits. This is like the previous test,
1212 but it works even if the comparison is done in a mode larger
1213 than HOST_BITS_PER_WIDE_INT. */
1214 if (HWI_COMPUTABLE_MODE_P (mode)
1215 && COMPARISON_P (op)
1216 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1218 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1219 if (temp)
1220 return temp;
1223 /* A truncate of a memory is just loading the low part of the memory
1224 if we are not changing the meaning of the address. */
1225 if (GET_CODE (op) == MEM
1226 && !VECTOR_MODE_P (mode)
1227 && !MEM_VOLATILE_P (op)
1228 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1230 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1231 if (temp)
1232 return temp;
1235 break;
1237 case FLOAT_TRUNCATE:
1238 if (DECIMAL_FLOAT_MODE_P (mode))
1239 break;
1241 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1242 if (GET_CODE (op) == FLOAT_EXTEND
1243 && GET_MODE (XEXP (op, 0)) == mode)
1244 return XEXP (op, 0);
1246 /* (float_truncate:SF (float_truncate:DF foo:XF))
1247 = (float_truncate:SF foo:XF).
1248 This may eliminate double rounding, so it is unsafe.
1250 (float_truncate:SF (float_extend:XF foo:DF))
1251 = (float_truncate:SF foo:DF).
1253 (float_truncate:DF (float_extend:XF foo:SF))
1254 = (float_extend:DF foo:SF). */
1255 if ((GET_CODE (op) == FLOAT_TRUNCATE
1256 && flag_unsafe_math_optimizations)
1257 || GET_CODE (op) == FLOAT_EXTEND)
1258 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1259 0)))
1260 > GET_MODE_SIZE (mode)
1261 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1262 mode,
1263 XEXP (op, 0), mode);
1265 /* (float_truncate (float x)) is (float x) */
1266 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1267 && (flag_unsafe_math_optimizations
1268 || exact_int_to_float_conversion_p (op)))
1269 return simplify_gen_unary (GET_CODE (op), mode,
1270 XEXP (op, 0),
1271 GET_MODE (XEXP (op, 0)));
1273 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1274 (OP:SF foo:SF) if OP is NEG or ABS. */
1275 if ((GET_CODE (op) == ABS
1276 || GET_CODE (op) == NEG)
1277 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1278 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1279 return simplify_gen_unary (GET_CODE (op), mode,
1280 XEXP (XEXP (op, 0), 0), mode);
1282 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1283 is (float_truncate:SF x). */
1284 if (GET_CODE (op) == SUBREG
1285 && subreg_lowpart_p (op)
1286 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1287 return SUBREG_REG (op);
1288 break;
1290 case FLOAT_EXTEND:
1291 if (DECIMAL_FLOAT_MODE_P (mode))
1292 break;
1294 /* (float_extend (float_extend x)) is (float_extend x)
1296 (float_extend (float x)) is (float x) assuming that double
1297 rounding can't happen.
1299 if (GET_CODE (op) == FLOAT_EXTEND
1300 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1301 && exact_int_to_float_conversion_p (op)))
1302 return simplify_gen_unary (GET_CODE (op), mode,
1303 XEXP (op, 0),
1304 GET_MODE (XEXP (op, 0)));
1306 break;
1308 case ABS:
1309 /* (abs (neg <foo>)) -> (abs <foo>) */
1310 if (GET_CODE (op) == NEG)
1311 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1312 GET_MODE (XEXP (op, 0)));
1314 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1315 do nothing. */
1316 if (GET_MODE (op) == VOIDmode)
1317 break;
1319 /* If operand is something known to be positive, ignore the ABS. */
1320 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1321 || val_signbit_known_clear_p (GET_MODE (op),
1322 nonzero_bits (op, GET_MODE (op))))
1323 return op;
1325 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1326 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1327 return gen_rtx_NEG (mode, op);
1329 break;
1331 case FFS:
1332 /* (ffs (*_extend <X>)) = (ffs <X>) */
1333 if (GET_CODE (op) == SIGN_EXTEND
1334 || GET_CODE (op) == ZERO_EXTEND)
1335 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1336 GET_MODE (XEXP (op, 0)));
1337 break;
1339 case POPCOUNT:
1340 switch (GET_CODE (op))
1342 case BSWAP:
1343 case ZERO_EXTEND:
1344 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1345 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1346 GET_MODE (XEXP (op, 0)));
1348 case ROTATE:
1349 case ROTATERT:
1350 /* Rotations don't affect popcount. */
1351 if (!side_effects_p (XEXP (op, 1)))
1352 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1353 GET_MODE (XEXP (op, 0)));
1354 break;
1356 default:
1357 break;
1359 break;
1361 case PARITY:
1362 switch (GET_CODE (op))
1364 case NOT:
1365 case BSWAP:
1366 case ZERO_EXTEND:
1367 case SIGN_EXTEND:
1368 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1369 GET_MODE (XEXP (op, 0)));
1371 case ROTATE:
1372 case ROTATERT:
1373 /* Rotations don't affect parity. */
1374 if (!side_effects_p (XEXP (op, 1)))
1375 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1376 GET_MODE (XEXP (op, 0)));
1377 break;
1379 default:
1380 break;
1382 break;
1384 case BSWAP:
1385 /* (bswap (bswap x)) -> x. */
1386 if (GET_CODE (op) == BSWAP)
1387 return XEXP (op, 0);
1388 break;
1390 case FLOAT:
1391 /* (float (sign_extend <X>)) = (float <X>). */
1392 if (GET_CODE (op) == SIGN_EXTEND)
1393 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1394 GET_MODE (XEXP (op, 0)));
1395 break;
1397 case SIGN_EXTEND:
1398 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1399 becomes just the MINUS if its mode is MODE. This allows
1400 folding switch statements on machines using casesi (such as
1401 the VAX). */
1402 if (GET_CODE (op) == TRUNCATE
1403 && GET_MODE (XEXP (op, 0)) == mode
1404 && GET_CODE (XEXP (op, 0)) == MINUS
1405 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1406 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1407 return XEXP (op, 0);
1409 /* Extending a widening multiplication should be canonicalized to
1410 a wider widening multiplication. */
1411 if (GET_CODE (op) == MULT)
1413 rtx lhs = XEXP (op, 0);
1414 rtx rhs = XEXP (op, 1);
1415 enum rtx_code lcode = GET_CODE (lhs);
1416 enum rtx_code rcode = GET_CODE (rhs);
1418 /* Widening multiplies usually extend both operands, but sometimes
1419 they use a shift to extract a portion of a register. */
1420 if ((lcode == SIGN_EXTEND
1421 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1422 && (rcode == SIGN_EXTEND
1423 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1425 machine_mode lmode = GET_MODE (lhs);
1426 machine_mode rmode = GET_MODE (rhs);
1427 int bits;
1429 if (lcode == ASHIFTRT)
1430 /* Number of bits not shifted off the end. */
1431 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1432 else /* lcode == SIGN_EXTEND */
1433 /* Size of inner mode. */
1434 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1436 if (rcode == ASHIFTRT)
1437 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1438 else /* rcode == SIGN_EXTEND */
1439 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1441 /* We can only widen multiplies if the result is mathematiclly
1442 equivalent. I.e. if overflow was impossible. */
1443 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1444 return simplify_gen_binary
1445 (MULT, mode,
1446 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1447 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1451 /* Check for a sign extension of a subreg of a promoted
1452 variable, where the promotion is sign-extended, and the
1453 target mode is the same as the variable's promotion. */
1454 if (GET_CODE (op) == SUBREG
1455 && SUBREG_PROMOTED_VAR_P (op)
1456 && SUBREG_PROMOTED_SIGNED_P (op)
1457 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1459 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1460 if (temp)
1461 return temp;
1464 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1465 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1466 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1468 gcc_assert (GET_MODE_PRECISION (mode)
1469 > GET_MODE_PRECISION (GET_MODE (op)));
1470 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1471 GET_MODE (XEXP (op, 0)));
1474 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1475 is (sign_extend:M (subreg:O <X>)) if there is mode with
1476 GET_MODE_BITSIZE (N) - I bits.
1477 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1478 is similarly (zero_extend:M (subreg:O <X>)). */
1479 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1480 && GET_CODE (XEXP (op, 0)) == ASHIFT
1481 && CONST_INT_P (XEXP (op, 1))
1482 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1483 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1485 machine_mode tmode
1486 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1487 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1488 gcc_assert (GET_MODE_BITSIZE (mode)
1489 > GET_MODE_BITSIZE (GET_MODE (op)));
1490 if (tmode != BLKmode)
1492 rtx inner =
1493 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1494 if (inner)
1495 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1496 ? SIGN_EXTEND : ZERO_EXTEND,
1497 mode, inner, tmode);
1501 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1502 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1503 if (GET_CODE (op) == LSHIFTRT
1504 && CONST_INT_P (XEXP (op, 1))
1505 && XEXP (op, 1) != const0_rtx)
1506 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1508 #if defined(POINTERS_EXTEND_UNSIGNED)
1509 /* As we do not know which address space the pointer is referring to,
1510 we can do this only if the target does not support different pointer
1511 or address modes depending on the address space. */
1512 if (target_default_pointer_address_modes_p ()
1513 && ! POINTERS_EXTEND_UNSIGNED
1514 && mode == Pmode && GET_MODE (op) == ptr_mode
1515 && (CONSTANT_P (op)
1516 || (GET_CODE (op) == SUBREG
1517 && REG_P (SUBREG_REG (op))
1518 && REG_POINTER (SUBREG_REG (op))
1519 && GET_MODE (SUBREG_REG (op)) == Pmode))
1520 && !targetm.have_ptr_extend ())
1522 temp
1523 = convert_memory_address_addr_space_1 (Pmode, op,
1524 ADDR_SPACE_GENERIC, false,
1525 true);
1526 if (temp)
1527 return temp;
1529 #endif
1530 break;
1532 case ZERO_EXTEND:
1533 /* Check for a zero extension of a subreg of a promoted
1534 variable, where the promotion is zero-extended, and the
1535 target mode is the same as the variable's promotion. */
1536 if (GET_CODE (op) == SUBREG
1537 && SUBREG_PROMOTED_VAR_P (op)
1538 && SUBREG_PROMOTED_UNSIGNED_P (op)
1539 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1541 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1542 if (temp)
1543 return temp;
1546 /* Extending a widening multiplication should be canonicalized to
1547 a wider widening multiplication. */
1548 if (GET_CODE (op) == MULT)
1550 rtx lhs = XEXP (op, 0);
1551 rtx rhs = XEXP (op, 1);
1552 enum rtx_code lcode = GET_CODE (lhs);
1553 enum rtx_code rcode = GET_CODE (rhs);
1555 /* Widening multiplies usually extend both operands, but sometimes
1556 they use a shift to extract a portion of a register. */
1557 if ((lcode == ZERO_EXTEND
1558 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1559 && (rcode == ZERO_EXTEND
1560 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1562 machine_mode lmode = GET_MODE (lhs);
1563 machine_mode rmode = GET_MODE (rhs);
1564 int bits;
1566 if (lcode == LSHIFTRT)
1567 /* Number of bits not shifted off the end. */
1568 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1569 else /* lcode == ZERO_EXTEND */
1570 /* Size of inner mode. */
1571 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1573 if (rcode == LSHIFTRT)
1574 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1575 else /* rcode == ZERO_EXTEND */
1576 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1578 /* We can only widen multiplies if the result is mathematiclly
1579 equivalent. I.e. if overflow was impossible. */
1580 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1581 return simplify_gen_binary
1582 (MULT, mode,
1583 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1584 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1588 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1589 if (GET_CODE (op) == ZERO_EXTEND)
1590 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1591 GET_MODE (XEXP (op, 0)));
1593 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1594 is (zero_extend:M (subreg:O <X>)) if there is mode with
1595 GET_MODE_PRECISION (N) - I bits. */
1596 if (GET_CODE (op) == LSHIFTRT
1597 && GET_CODE (XEXP (op, 0)) == ASHIFT
1598 && CONST_INT_P (XEXP (op, 1))
1599 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1600 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1602 machine_mode tmode
1603 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1604 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1605 if (tmode != BLKmode)
1607 rtx inner =
1608 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1609 if (inner)
1610 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1614 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1615 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1616 of mode N. E.g.
1617 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1618 (and:SI (reg:SI) (const_int 63)). */
1619 if (GET_CODE (op) == SUBREG
1620 && GET_MODE_PRECISION (GET_MODE (op))
1621 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1622 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1623 <= HOST_BITS_PER_WIDE_INT
1624 && GET_MODE_PRECISION (mode)
1625 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1626 && subreg_lowpart_p (op)
1627 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1628 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1630 if (GET_MODE_PRECISION (mode)
1631 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1632 return SUBREG_REG (op);
1633 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1634 GET_MODE (SUBREG_REG (op)));
1637 #if defined(POINTERS_EXTEND_UNSIGNED)
1638 /* As we do not know which address space the pointer is referring to,
1639 we can do this only if the target does not support different pointer
1640 or address modes depending on the address space. */
1641 if (target_default_pointer_address_modes_p ()
1642 && POINTERS_EXTEND_UNSIGNED > 0
1643 && mode == Pmode && GET_MODE (op) == ptr_mode
1644 && (CONSTANT_P (op)
1645 || (GET_CODE (op) == SUBREG
1646 && REG_P (SUBREG_REG (op))
1647 && REG_POINTER (SUBREG_REG (op))
1648 && GET_MODE (SUBREG_REG (op)) == Pmode))
1649 && !targetm.have_ptr_extend ())
1651 temp
1652 = convert_memory_address_addr_space_1 (Pmode, op,
1653 ADDR_SPACE_GENERIC, false,
1654 true);
1655 if (temp)
1656 return temp;
1658 #endif
1659 break;
1661 default:
1662 break;
1665 return 0;
1668 /* Try to compute the value of a unary operation CODE whose output mode is to
1669 be MODE with input operand OP whose mode was originally OP_MODE.
1670 Return zero if the value cannot be computed. */
1672 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1673 rtx op, machine_mode op_mode)
1675 unsigned int width = GET_MODE_PRECISION (mode);
1677 if (code == VEC_DUPLICATE)
1679 gcc_assert (VECTOR_MODE_P (mode));
1680 if (GET_MODE (op) != VOIDmode)
1682 if (!VECTOR_MODE_P (GET_MODE (op)))
1683 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1684 else
1685 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1686 (GET_MODE (op)));
1688 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1689 || GET_CODE (op) == CONST_VECTOR)
1691 int elt_size = GET_MODE_UNIT_SIZE (mode);
1692 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1693 rtvec v = rtvec_alloc (n_elts);
1694 unsigned int i;
1696 if (GET_CODE (op) != CONST_VECTOR)
1697 for (i = 0; i < n_elts; i++)
1698 RTVEC_ELT (v, i) = op;
1699 else
1701 machine_mode inmode = GET_MODE (op);
1702 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1703 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1705 gcc_assert (in_n_elts < n_elts);
1706 gcc_assert ((n_elts % in_n_elts) == 0);
1707 for (i = 0; i < n_elts; i++)
1708 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1710 return gen_rtx_CONST_VECTOR (mode, v);
1714 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1716 int elt_size = GET_MODE_UNIT_SIZE (mode);
1717 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1718 machine_mode opmode = GET_MODE (op);
1719 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1720 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1721 rtvec v = rtvec_alloc (n_elts);
1722 unsigned int i;
1724 gcc_assert (op_n_elts == n_elts);
1725 for (i = 0; i < n_elts; i++)
1727 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1728 CONST_VECTOR_ELT (op, i),
1729 GET_MODE_INNER (opmode));
1730 if (!x)
1731 return 0;
1732 RTVEC_ELT (v, i) = x;
1734 return gen_rtx_CONST_VECTOR (mode, v);
1737 /* The order of these tests is critical so that, for example, we don't
1738 check the wrong mode (input vs. output) for a conversion operation,
1739 such as FIX. At some point, this should be simplified. */
1741 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1743 REAL_VALUE_TYPE d;
1745 if (op_mode == VOIDmode)
1747 /* CONST_INT have VOIDmode as the mode. We assume that all
1748 the bits of the constant are significant, though, this is
1749 a dangerous assumption as many times CONST_INTs are
1750 created and used with garbage in the bits outside of the
1751 precision of the implied mode of the const_int. */
1752 op_mode = MAX_MODE_INT;
1755 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1757 /* Avoid the folding if flag_signaling_nans is on and
1758 operand is a signaling NaN. */
1759 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1760 return 0;
1762 d = real_value_truncate (mode, d);
1763 return const_double_from_real_value (d, mode);
1765 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1767 REAL_VALUE_TYPE d;
1769 if (op_mode == VOIDmode)
1771 /* CONST_INT have VOIDmode as the mode. We assume that all
1772 the bits of the constant are significant, though, this is
1773 a dangerous assumption as many times CONST_INTs are
1774 created and used with garbage in the bits outside of the
1775 precision of the implied mode of the const_int. */
1776 op_mode = MAX_MODE_INT;
1779 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1781 /* Avoid the folding if flag_signaling_nans is on and
1782 operand is a signaling NaN. */
1783 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1784 return 0;
1786 d = real_value_truncate (mode, d);
1787 return const_double_from_real_value (d, mode);
1790 if (CONST_SCALAR_INT_P (op) && width > 0)
1792 wide_int result;
1793 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1794 rtx_mode_t op0 = rtx_mode_t (op, imode);
1795 int int_value;
1797 #if TARGET_SUPPORTS_WIDE_INT == 0
1798 /* This assert keeps the simplification from producing a result
1799 that cannot be represented in a CONST_DOUBLE but a lot of
1800 upstream callers expect that this function never fails to
1801 simplify something and so you if you added this to the test
1802 above the code would die later anyway. If this assert
1803 happens, you just need to make the port support wide int. */
1804 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1805 #endif
1807 switch (code)
1809 case NOT:
1810 result = wi::bit_not (op0);
1811 break;
1813 case NEG:
1814 result = wi::neg (op0);
1815 break;
1817 case ABS:
1818 result = wi::abs (op0);
1819 break;
1821 case FFS:
1822 result = wi::shwi (wi::ffs (op0), mode);
1823 break;
1825 case CLZ:
1826 if (wi::ne_p (op0, 0))
1827 int_value = wi::clz (op0);
1828 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1829 int_value = GET_MODE_PRECISION (mode);
1830 result = wi::shwi (int_value, mode);
1831 break;
1833 case CLRSB:
1834 result = wi::shwi (wi::clrsb (op0), mode);
1835 break;
1837 case CTZ:
1838 if (wi::ne_p (op0, 0))
1839 int_value = wi::ctz (op0);
1840 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1841 int_value = GET_MODE_PRECISION (mode);
1842 result = wi::shwi (int_value, mode);
1843 break;
1845 case POPCOUNT:
1846 result = wi::shwi (wi::popcount (op0), mode);
1847 break;
1849 case PARITY:
1850 result = wi::shwi (wi::parity (op0), mode);
1851 break;
1853 case BSWAP:
1854 result = wide_int (op0).bswap ();
1855 break;
1857 case TRUNCATE:
1858 case ZERO_EXTEND:
1859 result = wide_int::from (op0, width, UNSIGNED);
1860 break;
1862 case SIGN_EXTEND:
1863 result = wide_int::from (op0, width, SIGNED);
1864 break;
1866 case SQRT:
1867 default:
1868 return 0;
1871 return immed_wide_int_const (result, mode);
1874 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1875 && SCALAR_FLOAT_MODE_P (mode)
1876 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1878 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1879 switch (code)
1881 case SQRT:
1882 return 0;
1883 case ABS:
1884 d = real_value_abs (&d);
1885 break;
1886 case NEG:
1887 d = real_value_negate (&d);
1888 break;
1889 case FLOAT_TRUNCATE:
1890 /* Don't perform the operation if flag_signaling_nans is on
1891 and the operand is a signaling NaN. */
1892 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1893 return NULL_RTX;
1894 d = real_value_truncate (mode, d);
1895 break;
1896 case FLOAT_EXTEND:
1897 /* Don't perform the operation if flag_signaling_nans is on
1898 and the operand is a signaling NaN. */
1899 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1900 return NULL_RTX;
1901 /* All this does is change the mode, unless changing
1902 mode class. */
1903 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1904 real_convert (&d, mode, &d);
1905 break;
1906 case FIX:
1907 /* Don't perform the operation if flag_signaling_nans is on
1908 and the operand is a signaling NaN. */
1909 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1910 return NULL_RTX;
1911 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1912 break;
1913 case NOT:
1915 long tmp[4];
1916 int i;
1918 real_to_target (tmp, &d, GET_MODE (op));
1919 for (i = 0; i < 4; i++)
1920 tmp[i] = ~tmp[i];
1921 real_from_target (&d, tmp, mode);
1922 break;
1924 default:
1925 gcc_unreachable ();
1927 return const_double_from_real_value (d, mode);
1929 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1930 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1931 && GET_MODE_CLASS (mode) == MODE_INT
1932 && width > 0)
1934 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1935 operators are intentionally left unspecified (to ease implementation
1936 by target backends), for consistency, this routine implements the
1937 same semantics for constant folding as used by the middle-end. */
1939 /* This was formerly used only for non-IEEE float.
1940 eggert@twinsun.com says it is safe for IEEE also. */
1941 REAL_VALUE_TYPE t;
1942 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1943 wide_int wmax, wmin;
1944 /* This is part of the abi to real_to_integer, but we check
1945 things before making this call. */
1946 bool fail;
1948 switch (code)
1950 case FIX:
1951 if (REAL_VALUE_ISNAN (*x))
1952 return const0_rtx;
1954 /* Test against the signed upper bound. */
1955 wmax = wi::max_value (width, SIGNED);
1956 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1957 if (real_less (&t, x))
1958 return immed_wide_int_const (wmax, mode);
1960 /* Test against the signed lower bound. */
1961 wmin = wi::min_value (width, SIGNED);
1962 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1963 if (real_less (x, &t))
1964 return immed_wide_int_const (wmin, mode);
1966 return immed_wide_int_const (real_to_integer (x, &fail, width),
1967 mode);
1969 case UNSIGNED_FIX:
1970 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1971 return const0_rtx;
1973 /* Test against the unsigned upper bound. */
1974 wmax = wi::max_value (width, UNSIGNED);
1975 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1976 if (real_less (&t, x))
1977 return immed_wide_int_const (wmax, mode);
1979 return immed_wide_int_const (real_to_integer (x, &fail, width),
1980 mode);
1982 default:
1983 gcc_unreachable ();
1987 return NULL_RTX;
1990 /* Subroutine of simplify_binary_operation to simplify a binary operation
1991 CODE that can commute with byte swapping, with result mode MODE and
1992 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1993 Return zero if no simplification or canonicalization is possible. */
1995 static rtx
1996 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1997 rtx op0, rtx op1)
1999 rtx tem;
2001 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2002 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2004 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2005 simplify_gen_unary (BSWAP, mode, op1, mode));
2006 return simplify_gen_unary (BSWAP, mode, tem, mode);
2009 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2010 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2012 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2013 return simplify_gen_unary (BSWAP, mode, tem, mode);
2016 return NULL_RTX;
2019 /* Subroutine of simplify_binary_operation to simplify a commutative,
2020 associative binary operation CODE with result mode MODE, operating
2021 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2022 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2023 canonicalization is possible. */
2025 static rtx
2026 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2027 rtx op0, rtx op1)
2029 rtx tem;
2031 /* Linearize the operator to the left. */
2032 if (GET_CODE (op1) == code)
2034 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2035 if (GET_CODE (op0) == code)
2037 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2038 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2041 /* "a op (b op c)" becomes "(b op c) op a". */
2042 if (! swap_commutative_operands_p (op1, op0))
2043 return simplify_gen_binary (code, mode, op1, op0);
2045 std::swap (op0, op1);
2048 if (GET_CODE (op0) == code)
2050 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2051 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2053 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2054 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2057 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2058 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2059 if (tem != 0)
2060 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2062 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2063 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2064 if (tem != 0)
2065 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2068 return 0;
2072 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2073 and OP1. Return 0 if no simplification is possible.
2075 Don't use this for relational operations such as EQ or LT.
2076 Use simplify_relational_operation instead. */
2078 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2079 rtx op0, rtx op1)
2081 rtx trueop0, trueop1;
2082 rtx tem;
2084 /* Relational operations don't work here. We must know the mode
2085 of the operands in order to do the comparison correctly.
2086 Assuming a full word can give incorrect results.
2087 Consider comparing 128 with -128 in QImode. */
2088 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2089 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2091 /* Make sure the constant is second. */
2092 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2093 && swap_commutative_operands_p (op0, op1))
2094 std::swap (op0, op1);
2096 trueop0 = avoid_constant_pool_reference (op0);
2097 trueop1 = avoid_constant_pool_reference (op1);
2099 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2100 if (tem)
2101 return tem;
2102 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2104 if (tem)
2105 return tem;
2107 /* If the above steps did not result in a simplification and op0 or op1
2108 were constant pool references, use the referenced constants directly. */
2109 if (trueop0 != op0 || trueop1 != op1)
2110 return simplify_gen_binary (code, mode, trueop0, trueop1);
2112 return NULL_RTX;
2115 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2116 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2117 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2118 actual constants. */
2120 static rtx
2121 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2122 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2124 rtx tem, reversed, opleft, opright;
2125 HOST_WIDE_INT val;
2126 unsigned int width = GET_MODE_PRECISION (mode);
2128 /* Even if we can't compute a constant result,
2129 there are some cases worth simplifying. */
2131 switch (code)
2133 case PLUS:
2134 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2135 when x is NaN, infinite, or finite and nonzero. They aren't
2136 when x is -0 and the rounding mode is not towards -infinity,
2137 since (-0) + 0 is then 0. */
2138 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2139 return op0;
2141 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2142 transformations are safe even for IEEE. */
2143 if (GET_CODE (op0) == NEG)
2144 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2145 else if (GET_CODE (op1) == NEG)
2146 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2148 /* (~a) + 1 -> -a */
2149 if (INTEGRAL_MODE_P (mode)
2150 && GET_CODE (op0) == NOT
2151 && trueop1 == const1_rtx)
2152 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2154 /* Handle both-operands-constant cases. We can only add
2155 CONST_INTs to constants since the sum of relocatable symbols
2156 can't be handled by most assemblers. Don't add CONST_INT
2157 to CONST_INT since overflow won't be computed properly if wider
2158 than HOST_BITS_PER_WIDE_INT. */
2160 if ((GET_CODE (op0) == CONST
2161 || GET_CODE (op0) == SYMBOL_REF
2162 || GET_CODE (op0) == LABEL_REF)
2163 && CONST_INT_P (op1))
2164 return plus_constant (mode, op0, INTVAL (op1));
2165 else if ((GET_CODE (op1) == CONST
2166 || GET_CODE (op1) == SYMBOL_REF
2167 || GET_CODE (op1) == LABEL_REF)
2168 && CONST_INT_P (op0))
2169 return plus_constant (mode, op1, INTVAL (op0));
2171 /* See if this is something like X * C - X or vice versa or
2172 if the multiplication is written as a shift. If so, we can
2173 distribute and make a new multiply, shift, or maybe just
2174 have X (if C is 2 in the example above). But don't make
2175 something more expensive than we had before. */
2177 if (SCALAR_INT_MODE_P (mode))
2179 rtx lhs = op0, rhs = op1;
2181 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2182 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2184 if (GET_CODE (lhs) == NEG)
2186 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2187 lhs = XEXP (lhs, 0);
2189 else if (GET_CODE (lhs) == MULT
2190 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2192 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2193 lhs = XEXP (lhs, 0);
2195 else if (GET_CODE (lhs) == ASHIFT
2196 && CONST_INT_P (XEXP (lhs, 1))
2197 && INTVAL (XEXP (lhs, 1)) >= 0
2198 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2200 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2201 GET_MODE_PRECISION (mode));
2202 lhs = XEXP (lhs, 0);
2205 if (GET_CODE (rhs) == NEG)
2207 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2208 rhs = XEXP (rhs, 0);
2210 else if (GET_CODE (rhs) == MULT
2211 && CONST_INT_P (XEXP (rhs, 1)))
2213 coeff1 = rtx_mode_t (XEXP (rhs, 1), mode);
2214 rhs = XEXP (rhs, 0);
2216 else if (GET_CODE (rhs) == ASHIFT
2217 && CONST_INT_P (XEXP (rhs, 1))
2218 && INTVAL (XEXP (rhs, 1)) >= 0
2219 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2221 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2222 GET_MODE_PRECISION (mode));
2223 rhs = XEXP (rhs, 0);
2226 if (rtx_equal_p (lhs, rhs))
2228 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2229 rtx coeff;
2230 bool speed = optimize_function_for_speed_p (cfun);
2232 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2234 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2235 return (set_src_cost (tem, mode, speed)
2236 <= set_src_cost (orig, mode, speed) ? tem : 0);
2240 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2241 if (CONST_SCALAR_INT_P (op1)
2242 && GET_CODE (op0) == XOR
2243 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2244 && mode_signbit_p (mode, op1))
2245 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2246 simplify_gen_binary (XOR, mode, op1,
2247 XEXP (op0, 1)));
2249 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2250 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2251 && GET_CODE (op0) == MULT
2252 && GET_CODE (XEXP (op0, 0)) == NEG)
2254 rtx in1, in2;
2256 in1 = XEXP (XEXP (op0, 0), 0);
2257 in2 = XEXP (op0, 1);
2258 return simplify_gen_binary (MINUS, mode, op1,
2259 simplify_gen_binary (MULT, mode,
2260 in1, in2));
2263 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2264 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2265 is 1. */
2266 if (COMPARISON_P (op0)
2267 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2268 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2269 && (reversed = reversed_comparison (op0, mode)))
2270 return
2271 simplify_gen_unary (NEG, mode, reversed, mode);
2273 /* If one of the operands is a PLUS or a MINUS, see if we can
2274 simplify this by the associative law.
2275 Don't use the associative law for floating point.
2276 The inaccuracy makes it nonassociative,
2277 and subtle programs can break if operations are associated. */
2279 if (INTEGRAL_MODE_P (mode)
2280 && (plus_minus_operand_p (op0)
2281 || plus_minus_operand_p (op1))
2282 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2283 return tem;
2285 /* Reassociate floating point addition only when the user
2286 specifies associative math operations. */
2287 if (FLOAT_MODE_P (mode)
2288 && flag_associative_math)
2290 tem = simplify_associative_operation (code, mode, op0, op1);
2291 if (tem)
2292 return tem;
2294 break;
2296 case COMPARE:
2297 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2298 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2299 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2300 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2302 rtx xop00 = XEXP (op0, 0);
2303 rtx xop10 = XEXP (op1, 0);
2305 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2306 return xop00;
2308 if (REG_P (xop00) && REG_P (xop10)
2309 && GET_MODE (xop00) == GET_MODE (xop10)
2310 && REGNO (xop00) == REGNO (xop10)
2311 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2312 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2313 return xop00;
2315 break;
2317 case MINUS:
2318 /* We can't assume x-x is 0 even with non-IEEE floating point,
2319 but since it is zero except in very strange circumstances, we
2320 will treat it as zero with -ffinite-math-only. */
2321 if (rtx_equal_p (trueop0, trueop1)
2322 && ! side_effects_p (op0)
2323 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2324 return CONST0_RTX (mode);
2326 /* Change subtraction from zero into negation. (0 - x) is the
2327 same as -x when x is NaN, infinite, or finite and nonzero.
2328 But if the mode has signed zeros, and does not round towards
2329 -infinity, then 0 - 0 is 0, not -0. */
2330 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2331 return simplify_gen_unary (NEG, mode, op1, mode);
2333 /* (-1 - a) is ~a, unless the expression contains symbolic
2334 constants, in which case not retaining additions and
2335 subtractions could cause invalid assembly to be produced. */
2336 if (trueop0 == constm1_rtx
2337 && !contains_symbolic_reference_p (op1))
2338 return simplify_gen_unary (NOT, mode, op1, mode);
2340 /* Subtracting 0 has no effect unless the mode has signed zeros
2341 and supports rounding towards -infinity. In such a case,
2342 0 - 0 is -0. */
2343 if (!(HONOR_SIGNED_ZEROS (mode)
2344 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2345 && trueop1 == CONST0_RTX (mode))
2346 return op0;
2348 /* See if this is something like X * C - X or vice versa or
2349 if the multiplication is written as a shift. If so, we can
2350 distribute and make a new multiply, shift, or maybe just
2351 have X (if C is 2 in the example above). But don't make
2352 something more expensive than we had before. */
2354 if (SCALAR_INT_MODE_P (mode))
2356 rtx lhs = op0, rhs = op1;
2358 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2359 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2361 if (GET_CODE (lhs) == NEG)
2363 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2364 lhs = XEXP (lhs, 0);
2366 else if (GET_CODE (lhs) == MULT
2367 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2369 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2370 lhs = XEXP (lhs, 0);
2372 else if (GET_CODE (lhs) == ASHIFT
2373 && CONST_INT_P (XEXP (lhs, 1))
2374 && INTVAL (XEXP (lhs, 1)) >= 0
2375 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2377 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2378 GET_MODE_PRECISION (mode));
2379 lhs = XEXP (lhs, 0);
2382 if (GET_CODE (rhs) == NEG)
2384 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2385 rhs = XEXP (rhs, 0);
2387 else if (GET_CODE (rhs) == MULT
2388 && CONST_INT_P (XEXP (rhs, 1)))
2390 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), mode));
2391 rhs = XEXP (rhs, 0);
2393 else if (GET_CODE (rhs) == ASHIFT
2394 && CONST_INT_P (XEXP (rhs, 1))
2395 && INTVAL (XEXP (rhs, 1)) >= 0
2396 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2398 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2399 GET_MODE_PRECISION (mode));
2400 negcoeff1 = -negcoeff1;
2401 rhs = XEXP (rhs, 0);
2404 if (rtx_equal_p (lhs, rhs))
2406 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2407 rtx coeff;
2408 bool speed = optimize_function_for_speed_p (cfun);
2410 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2412 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2413 return (set_src_cost (tem, mode, speed)
2414 <= set_src_cost (orig, mode, speed) ? tem : 0);
2418 /* (a - (-b)) -> (a + b). True even for IEEE. */
2419 if (GET_CODE (op1) == NEG)
2420 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2422 /* (-x - c) may be simplified as (-c - x). */
2423 if (GET_CODE (op0) == NEG
2424 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2426 tem = simplify_unary_operation (NEG, mode, op1, mode);
2427 if (tem)
2428 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2431 /* Don't let a relocatable value get a negative coeff. */
2432 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2433 return simplify_gen_binary (PLUS, mode,
2434 op0,
2435 neg_const_int (mode, op1));
2437 /* (x - (x & y)) -> (x & ~y) */
2438 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2440 if (rtx_equal_p (op0, XEXP (op1, 0)))
2442 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2443 GET_MODE (XEXP (op1, 1)));
2444 return simplify_gen_binary (AND, mode, op0, tem);
2446 if (rtx_equal_p (op0, XEXP (op1, 1)))
2448 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2449 GET_MODE (XEXP (op1, 0)));
2450 return simplify_gen_binary (AND, mode, op0, tem);
2454 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2455 by reversing the comparison code if valid. */
2456 if (STORE_FLAG_VALUE == 1
2457 && trueop0 == const1_rtx
2458 && COMPARISON_P (op1)
2459 && (reversed = reversed_comparison (op1, mode)))
2460 return reversed;
2462 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2463 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2464 && GET_CODE (op1) == MULT
2465 && GET_CODE (XEXP (op1, 0)) == NEG)
2467 rtx in1, in2;
2469 in1 = XEXP (XEXP (op1, 0), 0);
2470 in2 = XEXP (op1, 1);
2471 return simplify_gen_binary (PLUS, mode,
2472 simplify_gen_binary (MULT, mode,
2473 in1, in2),
2474 op0);
2477 /* Canonicalize (minus (neg A) (mult B C)) to
2478 (minus (mult (neg B) C) A). */
2479 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2480 && GET_CODE (op1) == MULT
2481 && GET_CODE (op0) == NEG)
2483 rtx in1, in2;
2485 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2486 in2 = XEXP (op1, 1);
2487 return simplify_gen_binary (MINUS, mode,
2488 simplify_gen_binary (MULT, mode,
2489 in1, in2),
2490 XEXP (op0, 0));
2493 /* If one of the operands is a PLUS or a MINUS, see if we can
2494 simplify this by the associative law. This will, for example,
2495 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2496 Don't use the associative law for floating point.
2497 The inaccuracy makes it nonassociative,
2498 and subtle programs can break if operations are associated. */
2500 if (INTEGRAL_MODE_P (mode)
2501 && (plus_minus_operand_p (op0)
2502 || plus_minus_operand_p (op1))
2503 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2504 return tem;
2505 break;
2507 case MULT:
2508 if (trueop1 == constm1_rtx)
2509 return simplify_gen_unary (NEG, mode, op0, mode);
2511 if (GET_CODE (op0) == NEG)
2513 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2514 /* If op1 is a MULT as well and simplify_unary_operation
2515 just moved the NEG to the second operand, simplify_gen_binary
2516 below could through simplify_associative_operation move
2517 the NEG around again and recurse endlessly. */
2518 if (temp
2519 && GET_CODE (op1) == MULT
2520 && GET_CODE (temp) == MULT
2521 && XEXP (op1, 0) == XEXP (temp, 0)
2522 && GET_CODE (XEXP (temp, 1)) == NEG
2523 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2524 temp = NULL_RTX;
2525 if (temp)
2526 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2528 if (GET_CODE (op1) == NEG)
2530 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2531 /* If op0 is a MULT as well and simplify_unary_operation
2532 just moved the NEG to the second operand, simplify_gen_binary
2533 below could through simplify_associative_operation move
2534 the NEG around again and recurse endlessly. */
2535 if (temp
2536 && GET_CODE (op0) == MULT
2537 && GET_CODE (temp) == MULT
2538 && XEXP (op0, 0) == XEXP (temp, 0)
2539 && GET_CODE (XEXP (temp, 1)) == NEG
2540 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2541 temp = NULL_RTX;
2542 if (temp)
2543 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2546 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2547 x is NaN, since x * 0 is then also NaN. Nor is it valid
2548 when the mode has signed zeros, since multiplying a negative
2549 number by 0 will give -0, not 0. */
2550 if (!HONOR_NANS (mode)
2551 && !HONOR_SIGNED_ZEROS (mode)
2552 && trueop1 == CONST0_RTX (mode)
2553 && ! side_effects_p (op0))
2554 return op1;
2556 /* In IEEE floating point, x*1 is not equivalent to x for
2557 signalling NaNs. */
2558 if (!HONOR_SNANS (mode)
2559 && trueop1 == CONST1_RTX (mode))
2560 return op0;
2562 /* Convert multiply by constant power of two into shift. */
2563 if (CONST_SCALAR_INT_P (trueop1))
2565 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2566 if (val >= 0)
2567 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2570 /* x*2 is x+x and x*(-1) is -x */
2571 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2572 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2573 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2574 && GET_MODE (op0) == mode)
2576 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2578 if (real_equal (d1, &dconst2))
2579 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2581 if (!HONOR_SNANS (mode)
2582 && real_equal (d1, &dconstm1))
2583 return simplify_gen_unary (NEG, mode, op0, mode);
2586 /* Optimize -x * -x as x * x. */
2587 if (FLOAT_MODE_P (mode)
2588 && GET_CODE (op0) == NEG
2589 && GET_CODE (op1) == NEG
2590 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2591 && !side_effects_p (XEXP (op0, 0)))
2592 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2594 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2595 if (SCALAR_FLOAT_MODE_P (mode)
2596 && GET_CODE (op0) == ABS
2597 && GET_CODE (op1) == ABS
2598 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2599 && !side_effects_p (XEXP (op0, 0)))
2600 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2602 /* Reassociate multiplication, but for floating point MULTs
2603 only when the user specifies unsafe math optimizations. */
2604 if (! FLOAT_MODE_P (mode)
2605 || flag_unsafe_math_optimizations)
2607 tem = simplify_associative_operation (code, mode, op0, op1);
2608 if (tem)
2609 return tem;
2611 break;
2613 case IOR:
2614 if (trueop1 == CONST0_RTX (mode))
2615 return op0;
2616 if (INTEGRAL_MODE_P (mode)
2617 && trueop1 == CONSTM1_RTX (mode)
2618 && !side_effects_p (op0))
2619 return op1;
2620 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2621 return op0;
2622 /* A | (~A) -> -1 */
2623 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2624 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2625 && ! side_effects_p (op0)
2626 && SCALAR_INT_MODE_P (mode))
2627 return constm1_rtx;
2629 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2630 if (CONST_INT_P (op1)
2631 && HWI_COMPUTABLE_MODE_P (mode)
2632 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2633 && !side_effects_p (op0))
2634 return op1;
2636 /* Canonicalize (X & C1) | C2. */
2637 if (GET_CODE (op0) == AND
2638 && CONST_INT_P (trueop1)
2639 && CONST_INT_P (XEXP (op0, 1)))
2641 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2642 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2643 HOST_WIDE_INT c2 = INTVAL (trueop1);
2645 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2646 if ((c1 & c2) == c1
2647 && !side_effects_p (XEXP (op0, 0)))
2648 return trueop1;
2650 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2651 if (((c1|c2) & mask) == mask)
2652 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2654 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2655 if (((c1 & ~c2) & mask) != (c1 & mask))
2657 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2658 gen_int_mode (c1 & ~c2, mode));
2659 return simplify_gen_binary (IOR, mode, tem, op1);
2663 /* Convert (A & B) | A to A. */
2664 if (GET_CODE (op0) == AND
2665 && (rtx_equal_p (XEXP (op0, 0), op1)
2666 || rtx_equal_p (XEXP (op0, 1), op1))
2667 && ! side_effects_p (XEXP (op0, 0))
2668 && ! side_effects_p (XEXP (op0, 1)))
2669 return op1;
2671 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2672 mode size to (rotate A CX). */
2674 if (GET_CODE (op1) == ASHIFT
2675 || GET_CODE (op1) == SUBREG)
2677 opleft = op1;
2678 opright = op0;
2680 else
2682 opright = op1;
2683 opleft = op0;
2686 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2687 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2688 && CONST_INT_P (XEXP (opleft, 1))
2689 && CONST_INT_P (XEXP (opright, 1))
2690 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2691 == GET_MODE_PRECISION (mode)))
2692 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2694 /* Same, but for ashift that has been "simplified" to a wider mode
2695 by simplify_shift_const. */
2697 if (GET_CODE (opleft) == SUBREG
2698 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2699 && GET_CODE (opright) == LSHIFTRT
2700 && GET_CODE (XEXP (opright, 0)) == SUBREG
2701 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2702 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2703 && (GET_MODE_SIZE (GET_MODE (opleft))
2704 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2705 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2706 SUBREG_REG (XEXP (opright, 0)))
2707 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2708 && CONST_INT_P (XEXP (opright, 1))
2709 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2710 == GET_MODE_PRECISION (mode)))
2711 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2712 XEXP (SUBREG_REG (opleft), 1));
2714 /* If we have (ior (and (X C1) C2)), simplify this by making
2715 C1 as small as possible if C1 actually changes. */
2716 if (CONST_INT_P (op1)
2717 && (HWI_COMPUTABLE_MODE_P (mode)
2718 || INTVAL (op1) > 0)
2719 && GET_CODE (op0) == AND
2720 && CONST_INT_P (XEXP (op0, 1))
2721 && CONST_INT_P (op1)
2722 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2724 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2725 gen_int_mode (UINTVAL (XEXP (op0, 1))
2726 & ~UINTVAL (op1),
2727 mode));
2728 return simplify_gen_binary (IOR, mode, tmp, op1);
2731 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2732 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2733 the PLUS does not affect any of the bits in OP1: then we can do
2734 the IOR as a PLUS and we can associate. This is valid if OP1
2735 can be safely shifted left C bits. */
2736 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2737 && GET_CODE (XEXP (op0, 0)) == PLUS
2738 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2739 && CONST_INT_P (XEXP (op0, 1))
2740 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2742 int count = INTVAL (XEXP (op0, 1));
2743 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2745 if (mask >> count == INTVAL (trueop1)
2746 && trunc_int_for_mode (mask, mode) == mask
2747 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2748 return simplify_gen_binary (ASHIFTRT, mode,
2749 plus_constant (mode, XEXP (op0, 0),
2750 mask),
2751 XEXP (op0, 1));
2754 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2755 if (tem)
2756 return tem;
2758 tem = simplify_associative_operation (code, mode, op0, op1);
2759 if (tem)
2760 return tem;
2761 break;
2763 case XOR:
2764 if (trueop1 == CONST0_RTX (mode))
2765 return op0;
2766 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2767 return simplify_gen_unary (NOT, mode, op0, mode);
2768 if (rtx_equal_p (trueop0, trueop1)
2769 && ! side_effects_p (op0)
2770 && GET_MODE_CLASS (mode) != MODE_CC)
2771 return CONST0_RTX (mode);
2773 /* Canonicalize XOR of the most significant bit to PLUS. */
2774 if (CONST_SCALAR_INT_P (op1)
2775 && mode_signbit_p (mode, op1))
2776 return simplify_gen_binary (PLUS, mode, op0, op1);
2777 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2778 if (CONST_SCALAR_INT_P (op1)
2779 && GET_CODE (op0) == PLUS
2780 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2781 && mode_signbit_p (mode, XEXP (op0, 1)))
2782 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2783 simplify_gen_binary (XOR, mode, op1,
2784 XEXP (op0, 1)));
2786 /* If we are XORing two things that have no bits in common,
2787 convert them into an IOR. This helps to detect rotation encoded
2788 using those methods and possibly other simplifications. */
2790 if (HWI_COMPUTABLE_MODE_P (mode)
2791 && (nonzero_bits (op0, mode)
2792 & nonzero_bits (op1, mode)) == 0)
2793 return (simplify_gen_binary (IOR, mode, op0, op1));
2795 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2796 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2797 (NOT y). */
2799 int num_negated = 0;
2801 if (GET_CODE (op0) == NOT)
2802 num_negated++, op0 = XEXP (op0, 0);
2803 if (GET_CODE (op1) == NOT)
2804 num_negated++, op1 = XEXP (op1, 0);
2806 if (num_negated == 2)
2807 return simplify_gen_binary (XOR, mode, op0, op1);
2808 else if (num_negated == 1)
2809 return simplify_gen_unary (NOT, mode,
2810 simplify_gen_binary (XOR, mode, op0, op1),
2811 mode);
2814 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2815 correspond to a machine insn or result in further simplifications
2816 if B is a constant. */
2818 if (GET_CODE (op0) == AND
2819 && rtx_equal_p (XEXP (op0, 1), op1)
2820 && ! side_effects_p (op1))
2821 return simplify_gen_binary (AND, mode,
2822 simplify_gen_unary (NOT, mode,
2823 XEXP (op0, 0), mode),
2824 op1);
2826 else if (GET_CODE (op0) == AND
2827 && rtx_equal_p (XEXP (op0, 0), op1)
2828 && ! side_effects_p (op1))
2829 return simplify_gen_binary (AND, mode,
2830 simplify_gen_unary (NOT, mode,
2831 XEXP (op0, 1), mode),
2832 op1);
2834 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2835 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2836 out bits inverted twice and not set by C. Similarly, given
2837 (xor (and (xor A B) C) D), simplify without inverting C in
2838 the xor operand: (xor (and A C) (B&C)^D).
2840 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2841 && GET_CODE (XEXP (op0, 0)) == XOR
2842 && CONST_INT_P (op1)
2843 && CONST_INT_P (XEXP (op0, 1))
2844 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2846 enum rtx_code op = GET_CODE (op0);
2847 rtx a = XEXP (XEXP (op0, 0), 0);
2848 rtx b = XEXP (XEXP (op0, 0), 1);
2849 rtx c = XEXP (op0, 1);
2850 rtx d = op1;
2851 HOST_WIDE_INT bval = INTVAL (b);
2852 HOST_WIDE_INT cval = INTVAL (c);
2853 HOST_WIDE_INT dval = INTVAL (d);
2854 HOST_WIDE_INT xcval;
2856 if (op == IOR)
2857 xcval = ~cval;
2858 else
2859 xcval = cval;
2861 return simplify_gen_binary (XOR, mode,
2862 simplify_gen_binary (op, mode, a, c),
2863 gen_int_mode ((bval & xcval) ^ dval,
2864 mode));
2867 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2868 we can transform like this:
2869 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2870 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2871 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2872 Attempt a few simplifications when B and C are both constants. */
2873 if (GET_CODE (op0) == AND
2874 && CONST_INT_P (op1)
2875 && CONST_INT_P (XEXP (op0, 1)))
2877 rtx a = XEXP (op0, 0);
2878 rtx b = XEXP (op0, 1);
2879 rtx c = op1;
2880 HOST_WIDE_INT bval = INTVAL (b);
2881 HOST_WIDE_INT cval = INTVAL (c);
2883 /* Instead of computing ~A&C, we compute its negated value,
2884 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2885 optimize for sure. If it does not simplify, we still try
2886 to compute ~A&C below, but since that always allocates
2887 RTL, we don't try that before committing to returning a
2888 simplified expression. */
2889 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2890 GEN_INT (~cval));
2892 if ((~cval & bval) == 0)
2894 rtx na_c = NULL_RTX;
2895 if (n_na_c)
2896 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2897 else
2899 /* If ~A does not simplify, don't bother: we don't
2900 want to simplify 2 operations into 3, and if na_c
2901 were to simplify with na, n_na_c would have
2902 simplified as well. */
2903 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2904 if (na)
2905 na_c = simplify_gen_binary (AND, mode, na, c);
2908 /* Try to simplify ~A&C | ~B&C. */
2909 if (na_c != NULL_RTX)
2910 return simplify_gen_binary (IOR, mode, na_c,
2911 gen_int_mode (~bval & cval, mode));
2913 else
2915 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2916 if (n_na_c == CONSTM1_RTX (mode))
2918 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2919 gen_int_mode (~cval & bval,
2920 mode));
2921 return simplify_gen_binary (IOR, mode, a_nc_b,
2922 gen_int_mode (~bval & cval,
2923 mode));
2928 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2929 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2930 machines, and also has shorter instruction path length. */
2931 if (GET_CODE (op0) == AND
2932 && GET_CODE (XEXP (op0, 0)) == XOR
2933 && CONST_INT_P (XEXP (op0, 1))
2934 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2936 rtx a = trueop1;
2937 rtx b = XEXP (XEXP (op0, 0), 1);
2938 rtx c = XEXP (op0, 1);
2939 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2940 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2941 rtx bc = simplify_gen_binary (AND, mode, b, c);
2942 return simplify_gen_binary (IOR, mode, a_nc, bc);
2944 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2945 else if (GET_CODE (op0) == AND
2946 && GET_CODE (XEXP (op0, 0)) == XOR
2947 && CONST_INT_P (XEXP (op0, 1))
2948 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2950 rtx a = XEXP (XEXP (op0, 0), 0);
2951 rtx b = trueop1;
2952 rtx c = XEXP (op0, 1);
2953 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2954 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2955 rtx ac = simplify_gen_binary (AND, mode, a, c);
2956 return simplify_gen_binary (IOR, mode, ac, b_nc);
2959 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2960 comparison if STORE_FLAG_VALUE is 1. */
2961 if (STORE_FLAG_VALUE == 1
2962 && trueop1 == const1_rtx
2963 && COMPARISON_P (op0)
2964 && (reversed = reversed_comparison (op0, mode)))
2965 return reversed;
2967 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2968 is (lt foo (const_int 0)), so we can perform the above
2969 simplification if STORE_FLAG_VALUE is 1. */
2971 if (STORE_FLAG_VALUE == 1
2972 && trueop1 == const1_rtx
2973 && GET_CODE (op0) == LSHIFTRT
2974 && CONST_INT_P (XEXP (op0, 1))
2975 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2976 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2978 /* (xor (comparison foo bar) (const_int sign-bit))
2979 when STORE_FLAG_VALUE is the sign bit. */
2980 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2981 && trueop1 == const_true_rtx
2982 && COMPARISON_P (op0)
2983 && (reversed = reversed_comparison (op0, mode)))
2984 return reversed;
2986 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2987 if (tem)
2988 return tem;
2990 tem = simplify_associative_operation (code, mode, op0, op1);
2991 if (tem)
2992 return tem;
2993 break;
2995 case AND:
2996 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2997 return trueop1;
2998 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2999 return op0;
3000 if (HWI_COMPUTABLE_MODE_P (mode))
3002 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3003 HOST_WIDE_INT nzop1;
3004 if (CONST_INT_P (trueop1))
3006 HOST_WIDE_INT val1 = INTVAL (trueop1);
3007 /* If we are turning off bits already known off in OP0, we need
3008 not do an AND. */
3009 if ((nzop0 & ~val1) == 0)
3010 return op0;
3012 nzop1 = nonzero_bits (trueop1, mode);
3013 /* If we are clearing all the nonzero bits, the result is zero. */
3014 if ((nzop1 & nzop0) == 0
3015 && !side_effects_p (op0) && !side_effects_p (op1))
3016 return CONST0_RTX (mode);
3018 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3019 && GET_MODE_CLASS (mode) != MODE_CC)
3020 return op0;
3021 /* A & (~A) -> 0 */
3022 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3023 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3024 && ! side_effects_p (op0)
3025 && GET_MODE_CLASS (mode) != MODE_CC)
3026 return CONST0_RTX (mode);
3028 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3029 there are no nonzero bits of C outside of X's mode. */
3030 if ((GET_CODE (op0) == SIGN_EXTEND
3031 || GET_CODE (op0) == ZERO_EXTEND)
3032 && CONST_INT_P (trueop1)
3033 && HWI_COMPUTABLE_MODE_P (mode)
3034 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3035 & UINTVAL (trueop1)) == 0)
3037 machine_mode imode = GET_MODE (XEXP (op0, 0));
3038 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3039 gen_int_mode (INTVAL (trueop1),
3040 imode));
3041 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3044 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3045 we might be able to further simplify the AND with X and potentially
3046 remove the truncation altogether. */
3047 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3049 rtx x = XEXP (op0, 0);
3050 machine_mode xmode = GET_MODE (x);
3051 tem = simplify_gen_binary (AND, xmode, x,
3052 gen_int_mode (INTVAL (trueop1), xmode));
3053 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3056 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3057 if (GET_CODE (op0) == IOR
3058 && CONST_INT_P (trueop1)
3059 && CONST_INT_P (XEXP (op0, 1)))
3061 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3062 return simplify_gen_binary (IOR, mode,
3063 simplify_gen_binary (AND, mode,
3064 XEXP (op0, 0), op1),
3065 gen_int_mode (tmp, mode));
3068 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3069 insn (and may simplify more). */
3070 if (GET_CODE (op0) == XOR
3071 && rtx_equal_p (XEXP (op0, 0), op1)
3072 && ! side_effects_p (op1))
3073 return simplify_gen_binary (AND, mode,
3074 simplify_gen_unary (NOT, mode,
3075 XEXP (op0, 1), mode),
3076 op1);
3078 if (GET_CODE (op0) == XOR
3079 && rtx_equal_p (XEXP (op0, 1), op1)
3080 && ! side_effects_p (op1))
3081 return simplify_gen_binary (AND, mode,
3082 simplify_gen_unary (NOT, mode,
3083 XEXP (op0, 0), mode),
3084 op1);
3086 /* Similarly for (~(A ^ B)) & A. */
3087 if (GET_CODE (op0) == NOT
3088 && GET_CODE (XEXP (op0, 0)) == XOR
3089 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3090 && ! side_effects_p (op1))
3091 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3093 if (GET_CODE (op0) == NOT
3094 && GET_CODE (XEXP (op0, 0)) == XOR
3095 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3096 && ! side_effects_p (op1))
3097 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3099 /* Convert (A | B) & A to A. */
3100 if (GET_CODE (op0) == IOR
3101 && (rtx_equal_p (XEXP (op0, 0), op1)
3102 || rtx_equal_p (XEXP (op0, 1), op1))
3103 && ! side_effects_p (XEXP (op0, 0))
3104 && ! side_effects_p (XEXP (op0, 1)))
3105 return op1;
3107 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3108 ((A & N) + B) & M -> (A + B) & M
3109 Similarly if (N & M) == 0,
3110 ((A | N) + B) & M -> (A + B) & M
3111 and for - instead of + and/or ^ instead of |.
3112 Also, if (N & M) == 0, then
3113 (A +- N) & M -> A & M. */
3114 if (CONST_INT_P (trueop1)
3115 && HWI_COMPUTABLE_MODE_P (mode)
3116 && ~UINTVAL (trueop1)
3117 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3118 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3120 rtx pmop[2];
3121 int which;
3123 pmop[0] = XEXP (op0, 0);
3124 pmop[1] = XEXP (op0, 1);
3126 if (CONST_INT_P (pmop[1])
3127 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3128 return simplify_gen_binary (AND, mode, pmop[0], op1);
3130 for (which = 0; which < 2; which++)
3132 tem = pmop[which];
3133 switch (GET_CODE (tem))
3135 case AND:
3136 if (CONST_INT_P (XEXP (tem, 1))
3137 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3138 == UINTVAL (trueop1))
3139 pmop[which] = XEXP (tem, 0);
3140 break;
3141 case IOR:
3142 case XOR:
3143 if (CONST_INT_P (XEXP (tem, 1))
3144 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3145 pmop[which] = XEXP (tem, 0);
3146 break;
3147 default:
3148 break;
3152 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3154 tem = simplify_gen_binary (GET_CODE (op0), mode,
3155 pmop[0], pmop[1]);
3156 return simplify_gen_binary (code, mode, tem, op1);
3160 /* (and X (ior (not X) Y) -> (and X Y) */
3161 if (GET_CODE (op1) == IOR
3162 && GET_CODE (XEXP (op1, 0)) == NOT
3163 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3164 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3166 /* (and (ior (not X) Y) X) -> (and X Y) */
3167 if (GET_CODE (op0) == IOR
3168 && GET_CODE (XEXP (op0, 0)) == NOT
3169 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3170 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3172 /* (and X (ior Y (not X)) -> (and X Y) */
3173 if (GET_CODE (op1) == IOR
3174 && GET_CODE (XEXP (op1, 1)) == NOT
3175 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3176 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3178 /* (and (ior Y (not X)) X) -> (and X Y) */
3179 if (GET_CODE (op0) == IOR
3180 && GET_CODE (XEXP (op0, 1)) == NOT
3181 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3182 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3184 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3185 if (tem)
3186 return tem;
3188 tem = simplify_associative_operation (code, mode, op0, op1);
3189 if (tem)
3190 return tem;
3191 break;
3193 case UDIV:
3194 /* 0/x is 0 (or x&0 if x has side-effects). */
3195 if (trueop0 == CONST0_RTX (mode))
3197 if (side_effects_p (op1))
3198 return simplify_gen_binary (AND, mode, op1, trueop0);
3199 return trueop0;
3201 /* x/1 is x. */
3202 if (trueop1 == CONST1_RTX (mode))
3204 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3205 if (tem)
3206 return tem;
3208 /* Convert divide by power of two into shift. */
3209 if (CONST_INT_P (trueop1)
3210 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3211 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3212 break;
3214 case DIV:
3215 /* Handle floating point and integers separately. */
3216 if (SCALAR_FLOAT_MODE_P (mode))
3218 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3219 safe for modes with NaNs, since 0.0 / 0.0 will then be
3220 NaN rather than 0.0. Nor is it safe for modes with signed
3221 zeros, since dividing 0 by a negative number gives -0.0 */
3222 if (trueop0 == CONST0_RTX (mode)
3223 && !HONOR_NANS (mode)
3224 && !HONOR_SIGNED_ZEROS (mode)
3225 && ! side_effects_p (op1))
3226 return op0;
3227 /* x/1.0 is x. */
3228 if (trueop1 == CONST1_RTX (mode)
3229 && !HONOR_SNANS (mode))
3230 return op0;
3232 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3233 && trueop1 != CONST0_RTX (mode))
3235 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3237 /* x/-1.0 is -x. */
3238 if (real_equal (d1, &dconstm1)
3239 && !HONOR_SNANS (mode))
3240 return simplify_gen_unary (NEG, mode, op0, mode);
3242 /* Change FP division by a constant into multiplication.
3243 Only do this with -freciprocal-math. */
3244 if (flag_reciprocal_math
3245 && !real_equal (d1, &dconst0))
3247 REAL_VALUE_TYPE d;
3248 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3249 tem = const_double_from_real_value (d, mode);
3250 return simplify_gen_binary (MULT, mode, op0, tem);
3254 else if (SCALAR_INT_MODE_P (mode))
3256 /* 0/x is 0 (or x&0 if x has side-effects). */
3257 if (trueop0 == CONST0_RTX (mode)
3258 && !cfun->can_throw_non_call_exceptions)
3260 if (side_effects_p (op1))
3261 return simplify_gen_binary (AND, mode, op1, trueop0);
3262 return trueop0;
3264 /* x/1 is x. */
3265 if (trueop1 == CONST1_RTX (mode))
3267 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3268 if (tem)
3269 return tem;
3271 /* x/-1 is -x. */
3272 if (trueop1 == constm1_rtx)
3274 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3275 if (x)
3276 return simplify_gen_unary (NEG, mode, x, mode);
3279 break;
3281 case UMOD:
3282 /* 0%x is 0 (or x&0 if x has side-effects). */
3283 if (trueop0 == CONST0_RTX (mode))
3285 if (side_effects_p (op1))
3286 return simplify_gen_binary (AND, mode, op1, trueop0);
3287 return trueop0;
3289 /* x%1 is 0 (of x&0 if x has side-effects). */
3290 if (trueop1 == CONST1_RTX (mode))
3292 if (side_effects_p (op0))
3293 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3294 return CONST0_RTX (mode);
3296 /* Implement modulus by power of two as AND. */
3297 if (CONST_INT_P (trueop1)
3298 && exact_log2 (UINTVAL (trueop1)) > 0)
3299 return simplify_gen_binary (AND, mode, op0,
3300 gen_int_mode (INTVAL (op1) - 1, mode));
3301 break;
3303 case MOD:
3304 /* 0%x is 0 (or x&0 if x has side-effects). */
3305 if (trueop0 == CONST0_RTX (mode))
3307 if (side_effects_p (op1))
3308 return simplify_gen_binary (AND, mode, op1, trueop0);
3309 return trueop0;
3311 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3312 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3314 if (side_effects_p (op0))
3315 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3316 return CONST0_RTX (mode);
3318 break;
3320 case ROTATERT:
3321 case ROTATE:
3322 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3323 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3324 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3325 amount instead. */
3326 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3327 if (CONST_INT_P (trueop1)
3328 && IN_RANGE (INTVAL (trueop1),
3329 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3330 GET_MODE_PRECISION (mode) - 1))
3331 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3332 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3333 - INTVAL (trueop1)));
3334 #endif
3335 /* FALLTHRU */
3336 case ASHIFTRT:
3337 if (trueop1 == CONST0_RTX (mode))
3338 return op0;
3339 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3340 return op0;
3341 /* Rotating ~0 always results in ~0. */
3342 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3343 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3344 && ! side_effects_p (op1))
3345 return op0;
3346 /* Given:
3347 scalar modes M1, M2
3348 scalar constants c1, c2
3349 size (M2) > size (M1)
3350 c1 == size (M2) - size (M1)
3351 optimize:
3352 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3353 <low_part>)
3354 (const_int <c2>))
3356 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3357 <low_part>). */
3358 if (code == ASHIFTRT
3359 && !VECTOR_MODE_P (mode)
3360 && SUBREG_P (op0)
3361 && CONST_INT_P (op1)
3362 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3363 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3364 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3365 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3366 > GET_MODE_BITSIZE (mode))
3367 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3368 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3369 - GET_MODE_BITSIZE (mode)))
3370 && subreg_lowpart_p (op0))
3372 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3373 + INTVAL (op1));
3374 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3375 tmp = simplify_gen_binary (ASHIFTRT,
3376 GET_MODE (SUBREG_REG (op0)),
3377 XEXP (SUBREG_REG (op0), 0),
3378 tmp);
3379 return lowpart_subreg (mode, tmp, inner_mode);
3381 canonicalize_shift:
3382 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3384 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3385 if (val != INTVAL (op1))
3386 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3388 break;
3390 case ASHIFT:
3391 case SS_ASHIFT:
3392 case US_ASHIFT:
3393 if (trueop1 == CONST0_RTX (mode))
3394 return op0;
3395 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3396 return op0;
3397 goto canonicalize_shift;
3399 case LSHIFTRT:
3400 if (trueop1 == CONST0_RTX (mode))
3401 return op0;
3402 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3403 return op0;
3404 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3405 if (GET_CODE (op0) == CLZ
3406 && CONST_INT_P (trueop1)
3407 && STORE_FLAG_VALUE == 1
3408 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3410 machine_mode imode = GET_MODE (XEXP (op0, 0));
3411 unsigned HOST_WIDE_INT zero_val = 0;
3413 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3414 && zero_val == GET_MODE_PRECISION (imode)
3415 && INTVAL (trueop1) == exact_log2 (zero_val))
3416 return simplify_gen_relational (EQ, mode, imode,
3417 XEXP (op0, 0), const0_rtx);
3419 goto canonicalize_shift;
3421 case SMIN:
3422 if (width <= HOST_BITS_PER_WIDE_INT
3423 && mode_signbit_p (mode, trueop1)
3424 && ! side_effects_p (op0))
3425 return op1;
3426 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3427 return op0;
3428 tem = simplify_associative_operation (code, mode, op0, op1);
3429 if (tem)
3430 return tem;
3431 break;
3433 case SMAX:
3434 if (width <= HOST_BITS_PER_WIDE_INT
3435 && CONST_INT_P (trueop1)
3436 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3437 && ! side_effects_p (op0))
3438 return op1;
3439 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3440 return op0;
3441 tem = simplify_associative_operation (code, mode, op0, op1);
3442 if (tem)
3443 return tem;
3444 break;
3446 case UMIN:
3447 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3448 return op1;
3449 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3450 return op0;
3451 tem = simplify_associative_operation (code, mode, op0, op1);
3452 if (tem)
3453 return tem;
3454 break;
3456 case UMAX:
3457 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3458 return op1;
3459 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3460 return op0;
3461 tem = simplify_associative_operation (code, mode, op0, op1);
3462 if (tem)
3463 return tem;
3464 break;
3466 case SS_PLUS:
3467 case US_PLUS:
3468 case SS_MINUS:
3469 case US_MINUS:
3470 case SS_MULT:
3471 case US_MULT:
3472 case SS_DIV:
3473 case US_DIV:
3474 /* ??? There are simplifications that can be done. */
3475 return 0;
3477 case VEC_SELECT:
3478 if (!VECTOR_MODE_P (mode))
3480 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3481 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3482 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3483 gcc_assert (XVECLEN (trueop1, 0) == 1);
3484 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3486 if (GET_CODE (trueop0) == CONST_VECTOR)
3487 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3488 (trueop1, 0, 0)));
3490 /* Extract a scalar element from a nested VEC_SELECT expression
3491 (with optional nested VEC_CONCAT expression). Some targets
3492 (i386) extract scalar element from a vector using chain of
3493 nested VEC_SELECT expressions. When input operand is a memory
3494 operand, this operation can be simplified to a simple scalar
3495 load from an offseted memory address. */
3496 if (GET_CODE (trueop0) == VEC_SELECT)
3498 rtx op0 = XEXP (trueop0, 0);
3499 rtx op1 = XEXP (trueop0, 1);
3501 machine_mode opmode = GET_MODE (op0);
3502 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3503 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3505 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3506 int elem;
3508 rtvec vec;
3509 rtx tmp_op, tmp;
3511 gcc_assert (GET_CODE (op1) == PARALLEL);
3512 gcc_assert (i < n_elts);
3514 /* Select element, pointed by nested selector. */
3515 elem = INTVAL (XVECEXP (op1, 0, i));
3517 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3518 if (GET_CODE (op0) == VEC_CONCAT)
3520 rtx op00 = XEXP (op0, 0);
3521 rtx op01 = XEXP (op0, 1);
3523 machine_mode mode00, mode01;
3524 int n_elts00, n_elts01;
3526 mode00 = GET_MODE (op00);
3527 mode01 = GET_MODE (op01);
3529 /* Find out number of elements of each operand. */
3530 if (VECTOR_MODE_P (mode00))
3532 elt_size = GET_MODE_UNIT_SIZE (mode00);
3533 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3535 else
3536 n_elts00 = 1;
3538 if (VECTOR_MODE_P (mode01))
3540 elt_size = GET_MODE_UNIT_SIZE (mode01);
3541 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3543 else
3544 n_elts01 = 1;
3546 gcc_assert (n_elts == n_elts00 + n_elts01);
3548 /* Select correct operand of VEC_CONCAT
3549 and adjust selector. */
3550 if (elem < n_elts01)
3551 tmp_op = op00;
3552 else
3554 tmp_op = op01;
3555 elem -= n_elts00;
3558 else
3559 tmp_op = op0;
3561 vec = rtvec_alloc (1);
3562 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3564 tmp = gen_rtx_fmt_ee (code, mode,
3565 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3566 return tmp;
3568 if (GET_CODE (trueop0) == VEC_DUPLICATE
3569 && GET_MODE (XEXP (trueop0, 0)) == mode)
3570 return XEXP (trueop0, 0);
3572 else
3574 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3575 gcc_assert (GET_MODE_INNER (mode)
3576 == GET_MODE_INNER (GET_MODE (trueop0)));
3577 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3579 if (GET_CODE (trueop0) == CONST_VECTOR)
3581 int elt_size = GET_MODE_UNIT_SIZE (mode);
3582 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3583 rtvec v = rtvec_alloc (n_elts);
3584 unsigned int i;
3586 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3587 for (i = 0; i < n_elts; i++)
3589 rtx x = XVECEXP (trueop1, 0, i);
3591 gcc_assert (CONST_INT_P (x));
3592 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3593 INTVAL (x));
3596 return gen_rtx_CONST_VECTOR (mode, v);
3599 /* Recognize the identity. */
3600 if (GET_MODE (trueop0) == mode)
3602 bool maybe_ident = true;
3603 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3605 rtx j = XVECEXP (trueop1, 0, i);
3606 if (!CONST_INT_P (j) || INTVAL (j) != i)
3608 maybe_ident = false;
3609 break;
3612 if (maybe_ident)
3613 return trueop0;
3616 /* If we build {a,b} then permute it, build the result directly. */
3617 if (XVECLEN (trueop1, 0) == 2
3618 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3619 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3620 && GET_CODE (trueop0) == VEC_CONCAT
3621 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3622 && GET_MODE (XEXP (trueop0, 0)) == mode
3623 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3624 && GET_MODE (XEXP (trueop0, 1)) == mode)
3626 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3627 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3628 rtx subop0, subop1;
3630 gcc_assert (i0 < 4 && i1 < 4);
3631 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3632 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3634 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3637 if (XVECLEN (trueop1, 0) == 2
3638 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3639 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3640 && GET_CODE (trueop0) == VEC_CONCAT
3641 && GET_MODE (trueop0) == mode)
3643 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3644 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3645 rtx subop0, subop1;
3647 gcc_assert (i0 < 2 && i1 < 2);
3648 subop0 = XEXP (trueop0, i0);
3649 subop1 = XEXP (trueop0, i1);
3651 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3654 /* If we select one half of a vec_concat, return that. */
3655 if (GET_CODE (trueop0) == VEC_CONCAT
3656 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3658 rtx subop0 = XEXP (trueop0, 0);
3659 rtx subop1 = XEXP (trueop0, 1);
3660 machine_mode mode0 = GET_MODE (subop0);
3661 machine_mode mode1 = GET_MODE (subop1);
3662 int li = GET_MODE_UNIT_SIZE (mode0);
3663 int l0 = GET_MODE_SIZE (mode0) / li;
3664 int l1 = GET_MODE_SIZE (mode1) / li;
3665 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3666 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3668 bool success = true;
3669 for (int i = 1; i < l0; ++i)
3671 rtx j = XVECEXP (trueop1, 0, i);
3672 if (!CONST_INT_P (j) || INTVAL (j) != i)
3674 success = false;
3675 break;
3678 if (success)
3679 return subop0;
3681 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3683 bool success = true;
3684 for (int i = 1; i < l1; ++i)
3686 rtx j = XVECEXP (trueop1, 0, i);
3687 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3689 success = false;
3690 break;
3693 if (success)
3694 return subop1;
3699 if (XVECLEN (trueop1, 0) == 1
3700 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3701 && GET_CODE (trueop0) == VEC_CONCAT)
3703 rtx vec = trueop0;
3704 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3706 /* Try to find the element in the VEC_CONCAT. */
3707 while (GET_MODE (vec) != mode
3708 && GET_CODE (vec) == VEC_CONCAT)
3710 HOST_WIDE_INT vec_size;
3712 if (CONST_INT_P (XEXP (vec, 0)))
3714 /* vec_concat of two const_ints doesn't make sense with
3715 respect to modes. */
3716 if (CONST_INT_P (XEXP (vec, 1)))
3717 return 0;
3719 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3720 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3722 else
3723 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3725 if (offset < vec_size)
3726 vec = XEXP (vec, 0);
3727 else
3729 offset -= vec_size;
3730 vec = XEXP (vec, 1);
3732 vec = avoid_constant_pool_reference (vec);
3735 if (GET_MODE (vec) == mode)
3736 return vec;
3739 /* If we select elements in a vec_merge that all come from the same
3740 operand, select from that operand directly. */
3741 if (GET_CODE (op0) == VEC_MERGE)
3743 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3744 if (CONST_INT_P (trueop02))
3746 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3747 bool all_operand0 = true;
3748 bool all_operand1 = true;
3749 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3751 rtx j = XVECEXP (trueop1, 0, i);
3752 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3753 all_operand1 = false;
3754 else
3755 all_operand0 = false;
3757 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3758 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3759 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3760 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3764 /* If we have two nested selects that are inverses of each
3765 other, replace them with the source operand. */
3766 if (GET_CODE (trueop0) == VEC_SELECT
3767 && GET_MODE (XEXP (trueop0, 0)) == mode)
3769 rtx op0_subop1 = XEXP (trueop0, 1);
3770 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3771 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3773 /* Apply the outer ordering vector to the inner one. (The inner
3774 ordering vector is expressly permitted to be of a different
3775 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3776 then the two VEC_SELECTs cancel. */
3777 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3779 rtx x = XVECEXP (trueop1, 0, i);
3780 if (!CONST_INT_P (x))
3781 return 0;
3782 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3783 if (!CONST_INT_P (y) || i != INTVAL (y))
3784 return 0;
3786 return XEXP (trueop0, 0);
3789 return 0;
3790 case VEC_CONCAT:
3792 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3793 ? GET_MODE (trueop0)
3794 : GET_MODE_INNER (mode));
3795 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3796 ? GET_MODE (trueop1)
3797 : GET_MODE_INNER (mode));
3799 gcc_assert (VECTOR_MODE_P (mode));
3800 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3801 == GET_MODE_SIZE (mode));
3803 if (VECTOR_MODE_P (op0_mode))
3804 gcc_assert (GET_MODE_INNER (mode)
3805 == GET_MODE_INNER (op0_mode));
3806 else
3807 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3809 if (VECTOR_MODE_P (op1_mode))
3810 gcc_assert (GET_MODE_INNER (mode)
3811 == GET_MODE_INNER (op1_mode));
3812 else
3813 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3815 if ((GET_CODE (trueop0) == CONST_VECTOR
3816 || CONST_SCALAR_INT_P (trueop0)
3817 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3818 && (GET_CODE (trueop1) == CONST_VECTOR
3819 || CONST_SCALAR_INT_P (trueop1)
3820 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3822 int elt_size = GET_MODE_UNIT_SIZE (mode);
3823 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3824 rtvec v = rtvec_alloc (n_elts);
3825 unsigned int i;
3826 unsigned in_n_elts = 1;
3828 if (VECTOR_MODE_P (op0_mode))
3829 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3830 for (i = 0; i < n_elts; i++)
3832 if (i < in_n_elts)
3834 if (!VECTOR_MODE_P (op0_mode))
3835 RTVEC_ELT (v, i) = trueop0;
3836 else
3837 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3839 else
3841 if (!VECTOR_MODE_P (op1_mode))
3842 RTVEC_ELT (v, i) = trueop1;
3843 else
3844 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3845 i - in_n_elts);
3849 return gen_rtx_CONST_VECTOR (mode, v);
3852 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3853 Restrict the transformation to avoid generating a VEC_SELECT with a
3854 mode unrelated to its operand. */
3855 if (GET_CODE (trueop0) == VEC_SELECT
3856 && GET_CODE (trueop1) == VEC_SELECT
3857 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3858 && GET_MODE (XEXP (trueop0, 0)) == mode)
3860 rtx par0 = XEXP (trueop0, 1);
3861 rtx par1 = XEXP (trueop1, 1);
3862 int len0 = XVECLEN (par0, 0);
3863 int len1 = XVECLEN (par1, 0);
3864 rtvec vec = rtvec_alloc (len0 + len1);
3865 for (int i = 0; i < len0; i++)
3866 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3867 for (int i = 0; i < len1; i++)
3868 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3869 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3870 gen_rtx_PARALLEL (VOIDmode, vec));
3873 return 0;
3875 default:
3876 gcc_unreachable ();
3879 return 0;
3883 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3884 rtx op0, rtx op1)
3886 unsigned int width = GET_MODE_PRECISION (mode);
3888 if (VECTOR_MODE_P (mode)
3889 && code != VEC_CONCAT
3890 && GET_CODE (op0) == CONST_VECTOR
3891 && GET_CODE (op1) == CONST_VECTOR)
3893 unsigned n_elts = GET_MODE_NUNITS (mode);
3894 machine_mode op0mode = GET_MODE (op0);
3895 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3896 machine_mode op1mode = GET_MODE (op1);
3897 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3898 rtvec v = rtvec_alloc (n_elts);
3899 unsigned int i;
3901 gcc_assert (op0_n_elts == n_elts);
3902 gcc_assert (op1_n_elts == n_elts);
3903 for (i = 0; i < n_elts; i++)
3905 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3906 CONST_VECTOR_ELT (op0, i),
3907 CONST_VECTOR_ELT (op1, i));
3908 if (!x)
3909 return 0;
3910 RTVEC_ELT (v, i) = x;
3913 return gen_rtx_CONST_VECTOR (mode, v);
3916 if (VECTOR_MODE_P (mode)
3917 && code == VEC_CONCAT
3918 && (CONST_SCALAR_INT_P (op0)
3919 || GET_CODE (op0) == CONST_FIXED
3920 || CONST_DOUBLE_AS_FLOAT_P (op0))
3921 && (CONST_SCALAR_INT_P (op1)
3922 || CONST_DOUBLE_AS_FLOAT_P (op1)
3923 || GET_CODE (op1) == CONST_FIXED))
3925 unsigned n_elts = GET_MODE_NUNITS (mode);
3926 rtvec v = rtvec_alloc (n_elts);
3928 gcc_assert (n_elts >= 2);
3929 if (n_elts == 2)
3931 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3932 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3934 RTVEC_ELT (v, 0) = op0;
3935 RTVEC_ELT (v, 1) = op1;
3937 else
3939 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3940 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3941 unsigned i;
3943 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3944 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3945 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3947 for (i = 0; i < op0_n_elts; ++i)
3948 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3949 for (i = 0; i < op1_n_elts; ++i)
3950 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3953 return gen_rtx_CONST_VECTOR (mode, v);
3956 if (SCALAR_FLOAT_MODE_P (mode)
3957 && CONST_DOUBLE_AS_FLOAT_P (op0)
3958 && CONST_DOUBLE_AS_FLOAT_P (op1)
3959 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3961 if (code == AND
3962 || code == IOR
3963 || code == XOR)
3965 long tmp0[4];
3966 long tmp1[4];
3967 REAL_VALUE_TYPE r;
3968 int i;
3970 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3971 GET_MODE (op0));
3972 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3973 GET_MODE (op1));
3974 for (i = 0; i < 4; i++)
3976 switch (code)
3978 case AND:
3979 tmp0[i] &= tmp1[i];
3980 break;
3981 case IOR:
3982 tmp0[i] |= tmp1[i];
3983 break;
3984 case XOR:
3985 tmp0[i] ^= tmp1[i];
3986 break;
3987 default:
3988 gcc_unreachable ();
3991 real_from_target (&r, tmp0, mode);
3992 return const_double_from_real_value (r, mode);
3994 else
3996 REAL_VALUE_TYPE f0, f1, value, result;
3997 const REAL_VALUE_TYPE *opr0, *opr1;
3998 bool inexact;
4000 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4001 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4003 if (HONOR_SNANS (mode)
4004 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4005 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4006 return 0;
4008 real_convert (&f0, mode, opr0);
4009 real_convert (&f1, mode, opr1);
4011 if (code == DIV
4012 && real_equal (&f1, &dconst0)
4013 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4014 return 0;
4016 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4017 && flag_trapping_math
4018 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4020 int s0 = REAL_VALUE_NEGATIVE (f0);
4021 int s1 = REAL_VALUE_NEGATIVE (f1);
4023 switch (code)
4025 case PLUS:
4026 /* Inf + -Inf = NaN plus exception. */
4027 if (s0 != s1)
4028 return 0;
4029 break;
4030 case MINUS:
4031 /* Inf - Inf = NaN plus exception. */
4032 if (s0 == s1)
4033 return 0;
4034 break;
4035 case DIV:
4036 /* Inf / Inf = NaN plus exception. */
4037 return 0;
4038 default:
4039 break;
4043 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4044 && flag_trapping_math
4045 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4046 || (REAL_VALUE_ISINF (f1)
4047 && real_equal (&f0, &dconst0))))
4048 /* Inf * 0 = NaN plus exception. */
4049 return 0;
4051 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4052 &f0, &f1);
4053 real_convert (&result, mode, &value);
4055 /* Don't constant fold this floating point operation if
4056 the result has overflowed and flag_trapping_math. */
4058 if (flag_trapping_math
4059 && MODE_HAS_INFINITIES (mode)
4060 && REAL_VALUE_ISINF (result)
4061 && !REAL_VALUE_ISINF (f0)
4062 && !REAL_VALUE_ISINF (f1))
4063 /* Overflow plus exception. */
4064 return 0;
4066 /* Don't constant fold this floating point operation if the
4067 result may dependent upon the run-time rounding mode and
4068 flag_rounding_math is set, or if GCC's software emulation
4069 is unable to accurately represent the result. */
4071 if ((flag_rounding_math
4072 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4073 && (inexact || !real_identical (&result, &value)))
4074 return NULL_RTX;
4076 return const_double_from_real_value (result, mode);
4080 /* We can fold some multi-word operations. */
4081 if ((GET_MODE_CLASS (mode) == MODE_INT
4082 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4083 && CONST_SCALAR_INT_P (op0)
4084 && CONST_SCALAR_INT_P (op1))
4086 wide_int result;
4087 bool overflow;
4088 rtx_mode_t pop0 = rtx_mode_t (op0, mode);
4089 rtx_mode_t pop1 = rtx_mode_t (op1, mode);
4091 #if TARGET_SUPPORTS_WIDE_INT == 0
4092 /* This assert keeps the simplification from producing a result
4093 that cannot be represented in a CONST_DOUBLE but a lot of
4094 upstream callers expect that this function never fails to
4095 simplify something and so you if you added this to the test
4096 above the code would die later anyway. If this assert
4097 happens, you just need to make the port support wide int. */
4098 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4099 #endif
4100 switch (code)
4102 case MINUS:
4103 result = wi::sub (pop0, pop1);
4104 break;
4106 case PLUS:
4107 result = wi::add (pop0, pop1);
4108 break;
4110 case MULT:
4111 result = wi::mul (pop0, pop1);
4112 break;
4114 case DIV:
4115 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4116 if (overflow)
4117 return NULL_RTX;
4118 break;
4120 case MOD:
4121 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4122 if (overflow)
4123 return NULL_RTX;
4124 break;
4126 case UDIV:
4127 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4128 if (overflow)
4129 return NULL_RTX;
4130 break;
4132 case UMOD:
4133 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4134 if (overflow)
4135 return NULL_RTX;
4136 break;
4138 case AND:
4139 result = wi::bit_and (pop0, pop1);
4140 break;
4142 case IOR:
4143 result = wi::bit_or (pop0, pop1);
4144 break;
4146 case XOR:
4147 result = wi::bit_xor (pop0, pop1);
4148 break;
4150 case SMIN:
4151 result = wi::smin (pop0, pop1);
4152 break;
4154 case SMAX:
4155 result = wi::smax (pop0, pop1);
4156 break;
4158 case UMIN:
4159 result = wi::umin (pop0, pop1);
4160 break;
4162 case UMAX:
4163 result = wi::umax (pop0, pop1);
4164 break;
4166 case LSHIFTRT:
4167 case ASHIFTRT:
4168 case ASHIFT:
4170 wide_int wop1 = pop1;
4171 if (SHIFT_COUNT_TRUNCATED)
4172 wop1 = wi::umod_trunc (wop1, width);
4173 else if (wi::geu_p (wop1, width))
4174 return NULL_RTX;
4176 switch (code)
4178 case LSHIFTRT:
4179 result = wi::lrshift (pop0, wop1);
4180 break;
4182 case ASHIFTRT:
4183 result = wi::arshift (pop0, wop1);
4184 break;
4186 case ASHIFT:
4187 result = wi::lshift (pop0, wop1);
4188 break;
4190 default:
4191 gcc_unreachable ();
4193 break;
4195 case ROTATE:
4196 case ROTATERT:
4198 if (wi::neg_p (pop1))
4199 return NULL_RTX;
4201 switch (code)
4203 case ROTATE:
4204 result = wi::lrotate (pop0, pop1);
4205 break;
4207 case ROTATERT:
4208 result = wi::rrotate (pop0, pop1);
4209 break;
4211 default:
4212 gcc_unreachable ();
4214 break;
4216 default:
4217 return NULL_RTX;
4219 return immed_wide_int_const (result, mode);
4222 return NULL_RTX;
4227 /* Return a positive integer if X should sort after Y. The value
4228 returned is 1 if and only if X and Y are both regs. */
4230 static int
4231 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4233 int result;
4235 result = (commutative_operand_precedence (y)
4236 - commutative_operand_precedence (x));
4237 if (result)
4238 return result + result;
4240 /* Group together equal REGs to do more simplification. */
4241 if (REG_P (x) && REG_P (y))
4242 return REGNO (x) > REGNO (y);
4244 return 0;
4247 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4248 operands may be another PLUS or MINUS.
4250 Rather than test for specific case, we do this by a brute-force method
4251 and do all possible simplifications until no more changes occur. Then
4252 we rebuild the operation.
4254 May return NULL_RTX when no changes were made. */
4256 static rtx
4257 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4258 rtx op1)
4260 struct simplify_plus_minus_op_data
4262 rtx op;
4263 short neg;
4264 } ops[16];
4265 rtx result, tem;
4266 int n_ops = 2;
4267 int changed, n_constants, canonicalized = 0;
4268 int i, j;
4270 memset (ops, 0, sizeof ops);
4272 /* Set up the two operands and then expand them until nothing has been
4273 changed. If we run out of room in our array, give up; this should
4274 almost never happen. */
4276 ops[0].op = op0;
4277 ops[0].neg = 0;
4278 ops[1].op = op1;
4279 ops[1].neg = (code == MINUS);
4283 changed = 0;
4284 n_constants = 0;
4286 for (i = 0; i < n_ops; i++)
4288 rtx this_op = ops[i].op;
4289 int this_neg = ops[i].neg;
4290 enum rtx_code this_code = GET_CODE (this_op);
4292 switch (this_code)
4294 case PLUS:
4295 case MINUS:
4296 if (n_ops == ARRAY_SIZE (ops))
4297 return NULL_RTX;
4299 ops[n_ops].op = XEXP (this_op, 1);
4300 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4301 n_ops++;
4303 ops[i].op = XEXP (this_op, 0);
4304 changed = 1;
4305 /* If this operand was negated then we will potentially
4306 canonicalize the expression. Similarly if we don't
4307 place the operands adjacent we're re-ordering the
4308 expression and thus might be performing a
4309 canonicalization. Ignore register re-ordering.
4310 ??? It might be better to shuffle the ops array here,
4311 but then (plus (plus (A, B), plus (C, D))) wouldn't
4312 be seen as non-canonical. */
4313 if (this_neg
4314 || (i != n_ops - 2
4315 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4316 canonicalized = 1;
4317 break;
4319 case NEG:
4320 ops[i].op = XEXP (this_op, 0);
4321 ops[i].neg = ! this_neg;
4322 changed = 1;
4323 canonicalized = 1;
4324 break;
4326 case CONST:
4327 if (n_ops != ARRAY_SIZE (ops)
4328 && GET_CODE (XEXP (this_op, 0)) == PLUS
4329 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4330 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4332 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4333 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4334 ops[n_ops].neg = this_neg;
4335 n_ops++;
4336 changed = 1;
4337 canonicalized = 1;
4339 break;
4341 case NOT:
4342 /* ~a -> (-a - 1) */
4343 if (n_ops != ARRAY_SIZE (ops))
4345 ops[n_ops].op = CONSTM1_RTX (mode);
4346 ops[n_ops++].neg = this_neg;
4347 ops[i].op = XEXP (this_op, 0);
4348 ops[i].neg = !this_neg;
4349 changed = 1;
4350 canonicalized = 1;
4352 break;
4354 case CONST_INT:
4355 n_constants++;
4356 if (this_neg)
4358 ops[i].op = neg_const_int (mode, this_op);
4359 ops[i].neg = 0;
4360 changed = 1;
4361 canonicalized = 1;
4363 break;
4365 default:
4366 break;
4370 while (changed);
4372 if (n_constants > 1)
4373 canonicalized = 1;
4375 gcc_assert (n_ops >= 2);
4377 /* If we only have two operands, we can avoid the loops. */
4378 if (n_ops == 2)
4380 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4381 rtx lhs, rhs;
4383 /* Get the two operands. Be careful with the order, especially for
4384 the cases where code == MINUS. */
4385 if (ops[0].neg && ops[1].neg)
4387 lhs = gen_rtx_NEG (mode, ops[0].op);
4388 rhs = ops[1].op;
4390 else if (ops[0].neg)
4392 lhs = ops[1].op;
4393 rhs = ops[0].op;
4395 else
4397 lhs = ops[0].op;
4398 rhs = ops[1].op;
4401 return simplify_const_binary_operation (code, mode, lhs, rhs);
4404 /* Now simplify each pair of operands until nothing changes. */
4405 while (1)
4407 /* Insertion sort is good enough for a small array. */
4408 for (i = 1; i < n_ops; i++)
4410 struct simplify_plus_minus_op_data save;
4411 int cmp;
4413 j = i - 1;
4414 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4415 if (cmp <= 0)
4416 continue;
4417 /* Just swapping registers doesn't count as canonicalization. */
4418 if (cmp != 1)
4419 canonicalized = 1;
4421 save = ops[i];
4423 ops[j + 1] = ops[j];
4424 while (j--
4425 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4426 ops[j + 1] = save;
4429 changed = 0;
4430 for (i = n_ops - 1; i > 0; i--)
4431 for (j = i - 1; j >= 0; j--)
4433 rtx lhs = ops[j].op, rhs = ops[i].op;
4434 int lneg = ops[j].neg, rneg = ops[i].neg;
4436 if (lhs != 0 && rhs != 0)
4438 enum rtx_code ncode = PLUS;
4440 if (lneg != rneg)
4442 ncode = MINUS;
4443 if (lneg)
4444 std::swap (lhs, rhs);
4446 else if (swap_commutative_operands_p (lhs, rhs))
4447 std::swap (lhs, rhs);
4449 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4450 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4452 rtx tem_lhs, tem_rhs;
4454 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4455 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4456 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4457 tem_rhs);
4459 if (tem && !CONSTANT_P (tem))
4460 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4462 else
4463 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4465 if (tem)
4467 /* Reject "simplifications" that just wrap the two
4468 arguments in a CONST. Failure to do so can result
4469 in infinite recursion with simplify_binary_operation
4470 when it calls us to simplify CONST operations.
4471 Also, if we find such a simplification, don't try
4472 any more combinations with this rhs: We must have
4473 something like symbol+offset, ie. one of the
4474 trivial CONST expressions we handle later. */
4475 if (GET_CODE (tem) == CONST
4476 && GET_CODE (XEXP (tem, 0)) == ncode
4477 && XEXP (XEXP (tem, 0), 0) == lhs
4478 && XEXP (XEXP (tem, 0), 1) == rhs)
4479 break;
4480 lneg &= rneg;
4481 if (GET_CODE (tem) == NEG)
4482 tem = XEXP (tem, 0), lneg = !lneg;
4483 if (CONST_INT_P (tem) && lneg)
4484 tem = neg_const_int (mode, tem), lneg = 0;
4486 ops[i].op = tem;
4487 ops[i].neg = lneg;
4488 ops[j].op = NULL_RTX;
4489 changed = 1;
4490 canonicalized = 1;
4495 if (!changed)
4496 break;
4498 /* Pack all the operands to the lower-numbered entries. */
4499 for (i = 0, j = 0; j < n_ops; j++)
4500 if (ops[j].op)
4502 ops[i] = ops[j];
4503 i++;
4505 n_ops = i;
4508 /* If nothing changed, check that rematerialization of rtl instructions
4509 is still required. */
4510 if (!canonicalized)
4512 /* Perform rematerialization if only all operands are registers and
4513 all operations are PLUS. */
4514 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4515 around rs6000 and how it uses the CA register. See PR67145. */
4516 for (i = 0; i < n_ops; i++)
4517 if (ops[i].neg
4518 || !REG_P (ops[i].op)
4519 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4520 && fixed_regs[REGNO (ops[i].op)]
4521 && !global_regs[REGNO (ops[i].op)]
4522 && ops[i].op != frame_pointer_rtx
4523 && ops[i].op != arg_pointer_rtx
4524 && ops[i].op != stack_pointer_rtx))
4525 return NULL_RTX;
4526 goto gen_result;
4529 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4530 if (n_ops == 2
4531 && CONST_INT_P (ops[1].op)
4532 && CONSTANT_P (ops[0].op)
4533 && ops[0].neg)
4534 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4536 /* We suppressed creation of trivial CONST expressions in the
4537 combination loop to avoid recursion. Create one manually now.
4538 The combination loop should have ensured that there is exactly
4539 one CONST_INT, and the sort will have ensured that it is last
4540 in the array and that any other constant will be next-to-last. */
4542 if (n_ops > 1
4543 && CONST_INT_P (ops[n_ops - 1].op)
4544 && CONSTANT_P (ops[n_ops - 2].op))
4546 rtx value = ops[n_ops - 1].op;
4547 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4548 value = neg_const_int (mode, value);
4549 if (CONST_INT_P (value))
4551 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4552 INTVAL (value));
4553 n_ops--;
4557 /* Put a non-negated operand first, if possible. */
4559 for (i = 0; i < n_ops && ops[i].neg; i++)
4560 continue;
4561 if (i == n_ops)
4562 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4563 else if (i != 0)
4565 tem = ops[0].op;
4566 ops[0] = ops[i];
4567 ops[i].op = tem;
4568 ops[i].neg = 1;
4571 /* Now make the result by performing the requested operations. */
4572 gen_result:
4573 result = ops[0].op;
4574 for (i = 1; i < n_ops; i++)
4575 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4576 mode, result, ops[i].op);
4578 return result;
4581 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4582 static bool
4583 plus_minus_operand_p (const_rtx x)
4585 return GET_CODE (x) == PLUS
4586 || GET_CODE (x) == MINUS
4587 || (GET_CODE (x) == CONST
4588 && GET_CODE (XEXP (x, 0)) == PLUS
4589 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4590 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4593 /* Like simplify_binary_operation except used for relational operators.
4594 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4595 not also be VOIDmode.
4597 CMP_MODE specifies in which mode the comparison is done in, so it is
4598 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4599 the operands or, if both are VOIDmode, the operands are compared in
4600 "infinite precision". */
4602 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4603 machine_mode cmp_mode, rtx op0, rtx op1)
4605 rtx tem, trueop0, trueop1;
4607 if (cmp_mode == VOIDmode)
4608 cmp_mode = GET_MODE (op0);
4609 if (cmp_mode == VOIDmode)
4610 cmp_mode = GET_MODE (op1);
4612 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4613 if (tem)
4615 if (SCALAR_FLOAT_MODE_P (mode))
4617 if (tem == const0_rtx)
4618 return CONST0_RTX (mode);
4619 #ifdef FLOAT_STORE_FLAG_VALUE
4621 REAL_VALUE_TYPE val;
4622 val = FLOAT_STORE_FLAG_VALUE (mode);
4623 return const_double_from_real_value (val, mode);
4625 #else
4626 return NULL_RTX;
4627 #endif
4629 if (VECTOR_MODE_P (mode))
4631 if (tem == const0_rtx)
4632 return CONST0_RTX (mode);
4633 #ifdef VECTOR_STORE_FLAG_VALUE
4635 int i, units;
4636 rtvec v;
4638 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4639 if (val == NULL_RTX)
4640 return NULL_RTX;
4641 if (val == const1_rtx)
4642 return CONST1_RTX (mode);
4644 units = GET_MODE_NUNITS (mode);
4645 v = rtvec_alloc (units);
4646 for (i = 0; i < units; i++)
4647 RTVEC_ELT (v, i) = val;
4648 return gen_rtx_raw_CONST_VECTOR (mode, v);
4650 #else
4651 return NULL_RTX;
4652 #endif
4655 return tem;
4658 /* For the following tests, ensure const0_rtx is op1. */
4659 if (swap_commutative_operands_p (op0, op1)
4660 || (op0 == const0_rtx && op1 != const0_rtx))
4661 std::swap (op0, op1), code = swap_condition (code);
4663 /* If op0 is a compare, extract the comparison arguments from it. */
4664 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4665 return simplify_gen_relational (code, mode, VOIDmode,
4666 XEXP (op0, 0), XEXP (op0, 1));
4668 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4669 || CC0_P (op0))
4670 return NULL_RTX;
4672 trueop0 = avoid_constant_pool_reference (op0);
4673 trueop1 = avoid_constant_pool_reference (op1);
4674 return simplify_relational_operation_1 (code, mode, cmp_mode,
4675 trueop0, trueop1);
4678 /* This part of simplify_relational_operation is only used when CMP_MODE
4679 is not in class MODE_CC (i.e. it is a real comparison).
4681 MODE is the mode of the result, while CMP_MODE specifies in which
4682 mode the comparison is done in, so it is the mode of the operands. */
4684 static rtx
4685 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4686 machine_mode cmp_mode, rtx op0, rtx op1)
4688 enum rtx_code op0code = GET_CODE (op0);
4690 if (op1 == const0_rtx && COMPARISON_P (op0))
4692 /* If op0 is a comparison, extract the comparison arguments
4693 from it. */
4694 if (code == NE)
4696 if (GET_MODE (op0) == mode)
4697 return simplify_rtx (op0);
4698 else
4699 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4700 XEXP (op0, 0), XEXP (op0, 1));
4702 else if (code == EQ)
4704 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4705 if (new_code != UNKNOWN)
4706 return simplify_gen_relational (new_code, mode, VOIDmode,
4707 XEXP (op0, 0), XEXP (op0, 1));
4711 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4712 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4713 if ((code == LTU || code == GEU)
4714 && GET_CODE (op0) == PLUS
4715 && CONST_INT_P (XEXP (op0, 1))
4716 && (rtx_equal_p (op1, XEXP (op0, 0))
4717 || rtx_equal_p (op1, XEXP (op0, 1)))
4718 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4719 && XEXP (op0, 1) != const0_rtx)
4721 rtx new_cmp
4722 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4723 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4724 cmp_mode, XEXP (op0, 0), new_cmp);
4727 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4728 transformed into (LTU a -C). */
4729 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4730 && CONST_INT_P (XEXP (op0, 1))
4731 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4732 && XEXP (op0, 1) != const0_rtx)
4734 rtx new_cmp
4735 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4736 return simplify_gen_relational (LTU, mode, cmp_mode,
4737 XEXP (op0, 0), new_cmp);
4740 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4741 if ((code == LTU || code == GEU)
4742 && GET_CODE (op0) == PLUS
4743 && rtx_equal_p (op1, XEXP (op0, 1))
4744 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4745 && !rtx_equal_p (op1, XEXP (op0, 0)))
4746 return simplify_gen_relational (code, mode, cmp_mode, op0,
4747 copy_rtx (XEXP (op0, 0)));
4749 if (op1 == const0_rtx)
4751 /* Canonicalize (GTU x 0) as (NE x 0). */
4752 if (code == GTU)
4753 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4754 /* Canonicalize (LEU x 0) as (EQ x 0). */
4755 if (code == LEU)
4756 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4758 else if (op1 == const1_rtx)
4760 switch (code)
4762 case GE:
4763 /* Canonicalize (GE x 1) as (GT x 0). */
4764 return simplify_gen_relational (GT, mode, cmp_mode,
4765 op0, const0_rtx);
4766 case GEU:
4767 /* Canonicalize (GEU x 1) as (NE x 0). */
4768 return simplify_gen_relational (NE, mode, cmp_mode,
4769 op0, const0_rtx);
4770 case LT:
4771 /* Canonicalize (LT x 1) as (LE x 0). */
4772 return simplify_gen_relational (LE, mode, cmp_mode,
4773 op0, const0_rtx);
4774 case LTU:
4775 /* Canonicalize (LTU x 1) as (EQ x 0). */
4776 return simplify_gen_relational (EQ, mode, cmp_mode,
4777 op0, const0_rtx);
4778 default:
4779 break;
4782 else if (op1 == constm1_rtx)
4784 /* Canonicalize (LE x -1) as (LT x 0). */
4785 if (code == LE)
4786 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4787 /* Canonicalize (GT x -1) as (GE x 0). */
4788 if (code == GT)
4789 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4792 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4793 if ((code == EQ || code == NE)
4794 && (op0code == PLUS || op0code == MINUS)
4795 && CONSTANT_P (op1)
4796 && CONSTANT_P (XEXP (op0, 1))
4797 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4799 rtx x = XEXP (op0, 0);
4800 rtx c = XEXP (op0, 1);
4801 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4802 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4804 /* Detect an infinite recursive condition, where we oscillate at this
4805 simplification case between:
4806 A + B == C <---> C - B == A,
4807 where A, B, and C are all constants with non-simplifiable expressions,
4808 usually SYMBOL_REFs. */
4809 if (GET_CODE (tem) == invcode
4810 && CONSTANT_P (x)
4811 && rtx_equal_p (c, XEXP (tem, 1)))
4812 return NULL_RTX;
4814 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4817 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4818 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4819 if (code == NE
4820 && op1 == const0_rtx
4821 && GET_MODE_CLASS (mode) == MODE_INT
4822 && cmp_mode != VOIDmode
4823 /* ??? Work-around BImode bugs in the ia64 backend. */
4824 && mode != BImode
4825 && cmp_mode != BImode
4826 && nonzero_bits (op0, cmp_mode) == 1
4827 && STORE_FLAG_VALUE == 1)
4828 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4829 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4830 : lowpart_subreg (mode, op0, cmp_mode);
4832 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4833 if ((code == EQ || code == NE)
4834 && op1 == const0_rtx
4835 && op0code == XOR)
4836 return simplify_gen_relational (code, mode, cmp_mode,
4837 XEXP (op0, 0), XEXP (op0, 1));
4839 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4840 if ((code == EQ || code == NE)
4841 && op0code == XOR
4842 && rtx_equal_p (XEXP (op0, 0), op1)
4843 && !side_effects_p (XEXP (op0, 0)))
4844 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4845 CONST0_RTX (mode));
4847 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4848 if ((code == EQ || code == NE)
4849 && op0code == XOR
4850 && rtx_equal_p (XEXP (op0, 1), op1)
4851 && !side_effects_p (XEXP (op0, 1)))
4852 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4853 CONST0_RTX (mode));
4855 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4856 if ((code == EQ || code == NE)
4857 && op0code == XOR
4858 && CONST_SCALAR_INT_P (op1)
4859 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4860 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4861 simplify_gen_binary (XOR, cmp_mode,
4862 XEXP (op0, 1), op1));
4864 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4865 can be implemented with a BICS instruction on some targets, or
4866 constant-folded if y is a constant. */
4867 if ((code == EQ || code == NE)
4868 && op0code == AND
4869 && rtx_equal_p (XEXP (op0, 0), op1)
4870 && !side_effects_p (op1)
4871 && op1 != CONST0_RTX (cmp_mode))
4873 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4874 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4876 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4877 CONST0_RTX (cmp_mode));
4880 /* Likewise for (eq/ne (and x y) y). */
4881 if ((code == EQ || code == NE)
4882 && op0code == AND
4883 && rtx_equal_p (XEXP (op0, 1), op1)
4884 && !side_effects_p (op1)
4885 && op1 != CONST0_RTX (cmp_mode))
4887 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4888 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4890 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4891 CONST0_RTX (cmp_mode));
4894 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4895 if ((code == EQ || code == NE)
4896 && GET_CODE (op0) == BSWAP
4897 && CONST_SCALAR_INT_P (op1))
4898 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4899 simplify_gen_unary (BSWAP, cmp_mode,
4900 op1, cmp_mode));
4902 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4903 if ((code == EQ || code == NE)
4904 && GET_CODE (op0) == BSWAP
4905 && GET_CODE (op1) == BSWAP)
4906 return simplify_gen_relational (code, mode, cmp_mode,
4907 XEXP (op0, 0), XEXP (op1, 0));
4909 if (op0code == POPCOUNT && op1 == const0_rtx)
4910 switch (code)
4912 case EQ:
4913 case LE:
4914 case LEU:
4915 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4916 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4917 XEXP (op0, 0), const0_rtx);
4919 case NE:
4920 case GT:
4921 case GTU:
4922 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4923 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4924 XEXP (op0, 0), const0_rtx);
4926 default:
4927 break;
4930 return NULL_RTX;
4933 enum
4935 CMP_EQ = 1,
4936 CMP_LT = 2,
4937 CMP_GT = 4,
4938 CMP_LTU = 8,
4939 CMP_GTU = 16
4943 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4944 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4945 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4946 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4947 For floating-point comparisons, assume that the operands were ordered. */
4949 static rtx
4950 comparison_result (enum rtx_code code, int known_results)
4952 switch (code)
4954 case EQ:
4955 case UNEQ:
4956 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4957 case NE:
4958 case LTGT:
4959 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4961 case LT:
4962 case UNLT:
4963 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4964 case GE:
4965 case UNGE:
4966 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4968 case GT:
4969 case UNGT:
4970 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4971 case LE:
4972 case UNLE:
4973 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4975 case LTU:
4976 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4977 case GEU:
4978 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4980 case GTU:
4981 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4982 case LEU:
4983 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4985 case ORDERED:
4986 return const_true_rtx;
4987 case UNORDERED:
4988 return const0_rtx;
4989 default:
4990 gcc_unreachable ();
4994 /* Check if the given comparison (done in the given MODE) is actually
4995 a tautology or a contradiction. If the mode is VOID_mode, the
4996 comparison is done in "infinite precision". If no simplification
4997 is possible, this function returns zero. Otherwise, it returns
4998 either const_true_rtx or const0_rtx. */
5001 simplify_const_relational_operation (enum rtx_code code,
5002 machine_mode mode,
5003 rtx op0, rtx op1)
5005 rtx tem;
5006 rtx trueop0;
5007 rtx trueop1;
5009 gcc_assert (mode != VOIDmode
5010 || (GET_MODE (op0) == VOIDmode
5011 && GET_MODE (op1) == VOIDmode));
5013 /* If op0 is a compare, extract the comparison arguments from it. */
5014 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5016 op1 = XEXP (op0, 1);
5017 op0 = XEXP (op0, 0);
5019 if (GET_MODE (op0) != VOIDmode)
5020 mode = GET_MODE (op0);
5021 else if (GET_MODE (op1) != VOIDmode)
5022 mode = GET_MODE (op1);
5023 else
5024 return 0;
5027 /* We can't simplify MODE_CC values since we don't know what the
5028 actual comparison is. */
5029 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5030 return 0;
5032 /* Make sure the constant is second. */
5033 if (swap_commutative_operands_p (op0, op1))
5035 std::swap (op0, op1);
5036 code = swap_condition (code);
5039 trueop0 = avoid_constant_pool_reference (op0);
5040 trueop1 = avoid_constant_pool_reference (op1);
5042 /* For integer comparisons of A and B maybe we can simplify A - B and can
5043 then simplify a comparison of that with zero. If A and B are both either
5044 a register or a CONST_INT, this can't help; testing for these cases will
5045 prevent infinite recursion here and speed things up.
5047 We can only do this for EQ and NE comparisons as otherwise we may
5048 lose or introduce overflow which we cannot disregard as undefined as
5049 we do not know the signedness of the operation on either the left or
5050 the right hand side of the comparison. */
5052 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5053 && (code == EQ || code == NE)
5054 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5055 && (REG_P (op1) || CONST_INT_P (trueop1)))
5056 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5057 /* We cannot do this if tem is a nonzero address. */
5058 && ! nonzero_address_p (tem))
5059 return simplify_const_relational_operation (signed_condition (code),
5060 mode, tem, const0_rtx);
5062 if (! HONOR_NANS (mode) && code == ORDERED)
5063 return const_true_rtx;
5065 if (! HONOR_NANS (mode) && code == UNORDERED)
5066 return const0_rtx;
5068 /* For modes without NaNs, if the two operands are equal, we know the
5069 result except if they have side-effects. Even with NaNs we know
5070 the result of unordered comparisons and, if signaling NaNs are
5071 irrelevant, also the result of LT/GT/LTGT. */
5072 if ((! HONOR_NANS (trueop0)
5073 || code == UNEQ || code == UNLE || code == UNGE
5074 || ((code == LT || code == GT || code == LTGT)
5075 && ! HONOR_SNANS (trueop0)))
5076 && rtx_equal_p (trueop0, trueop1)
5077 && ! side_effects_p (trueop0))
5078 return comparison_result (code, CMP_EQ);
5080 /* If the operands are floating-point constants, see if we can fold
5081 the result. */
5082 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5083 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5084 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5086 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5087 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5089 /* Comparisons are unordered iff at least one of the values is NaN. */
5090 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5091 switch (code)
5093 case UNEQ:
5094 case UNLT:
5095 case UNGT:
5096 case UNLE:
5097 case UNGE:
5098 case NE:
5099 case UNORDERED:
5100 return const_true_rtx;
5101 case EQ:
5102 case LT:
5103 case GT:
5104 case LE:
5105 case GE:
5106 case LTGT:
5107 case ORDERED:
5108 return const0_rtx;
5109 default:
5110 return 0;
5113 return comparison_result (code,
5114 (real_equal (d0, d1) ? CMP_EQ :
5115 real_less (d0, d1) ? CMP_LT : CMP_GT));
5118 /* Otherwise, see if the operands are both integers. */
5119 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5120 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5122 /* It would be nice if we really had a mode here. However, the
5123 largest int representable on the target is as good as
5124 infinite. */
5125 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5126 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5127 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5129 if (wi::eq_p (ptrueop0, ptrueop1))
5130 return comparison_result (code, CMP_EQ);
5131 else
5133 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5134 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5135 return comparison_result (code, cr);
5139 /* Optimize comparisons with upper and lower bounds. */
5140 if (HWI_COMPUTABLE_MODE_P (mode)
5141 && CONST_INT_P (trueop1)
5142 && !side_effects_p (trueop0))
5144 int sign;
5145 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5146 HOST_WIDE_INT val = INTVAL (trueop1);
5147 HOST_WIDE_INT mmin, mmax;
5149 if (code == GEU
5150 || code == LEU
5151 || code == GTU
5152 || code == LTU)
5153 sign = 0;
5154 else
5155 sign = 1;
5157 /* Get a reduced range if the sign bit is zero. */
5158 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5160 mmin = 0;
5161 mmax = nonzero;
5163 else
5165 rtx mmin_rtx, mmax_rtx;
5166 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5168 mmin = INTVAL (mmin_rtx);
5169 mmax = INTVAL (mmax_rtx);
5170 if (sign)
5172 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5174 mmin >>= (sign_copies - 1);
5175 mmax >>= (sign_copies - 1);
5179 switch (code)
5181 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5182 case GEU:
5183 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5184 return const_true_rtx;
5185 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5186 return const0_rtx;
5187 break;
5188 case GE:
5189 if (val <= mmin)
5190 return const_true_rtx;
5191 if (val > mmax)
5192 return const0_rtx;
5193 break;
5195 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5196 case LEU:
5197 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5198 return const_true_rtx;
5199 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5200 return const0_rtx;
5201 break;
5202 case LE:
5203 if (val >= mmax)
5204 return const_true_rtx;
5205 if (val < mmin)
5206 return const0_rtx;
5207 break;
5209 case EQ:
5210 /* x == y is always false for y out of range. */
5211 if (val < mmin || val > mmax)
5212 return const0_rtx;
5213 break;
5215 /* x > y is always false for y >= mmax, always true for y < mmin. */
5216 case GTU:
5217 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5218 return const0_rtx;
5219 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5220 return const_true_rtx;
5221 break;
5222 case GT:
5223 if (val >= mmax)
5224 return const0_rtx;
5225 if (val < mmin)
5226 return const_true_rtx;
5227 break;
5229 /* x < y is always false for y <= mmin, always true for y > mmax. */
5230 case LTU:
5231 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5232 return const0_rtx;
5233 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5234 return const_true_rtx;
5235 break;
5236 case LT:
5237 if (val <= mmin)
5238 return const0_rtx;
5239 if (val > mmax)
5240 return const_true_rtx;
5241 break;
5243 case NE:
5244 /* x != y is always true for y out of range. */
5245 if (val < mmin || val > mmax)
5246 return const_true_rtx;
5247 break;
5249 default:
5250 break;
5254 /* Optimize integer comparisons with zero. */
5255 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5257 /* Some addresses are known to be nonzero. We don't know
5258 their sign, but equality comparisons are known. */
5259 if (nonzero_address_p (trueop0))
5261 if (code == EQ || code == LEU)
5262 return const0_rtx;
5263 if (code == NE || code == GTU)
5264 return const_true_rtx;
5267 /* See if the first operand is an IOR with a constant. If so, we
5268 may be able to determine the result of this comparison. */
5269 if (GET_CODE (op0) == IOR)
5271 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5272 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5274 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5275 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5276 && (UINTVAL (inner_const)
5277 & (HOST_WIDE_INT_1U
5278 << sign_bitnum)));
5280 switch (code)
5282 case EQ:
5283 case LEU:
5284 return const0_rtx;
5285 case NE:
5286 case GTU:
5287 return const_true_rtx;
5288 case LT:
5289 case LE:
5290 if (has_sign)
5291 return const_true_rtx;
5292 break;
5293 case GT:
5294 case GE:
5295 if (has_sign)
5296 return const0_rtx;
5297 break;
5298 default:
5299 break;
5305 /* Optimize comparison of ABS with zero. */
5306 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5307 && (GET_CODE (trueop0) == ABS
5308 || (GET_CODE (trueop0) == FLOAT_EXTEND
5309 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5311 switch (code)
5313 case LT:
5314 /* Optimize abs(x) < 0.0. */
5315 if (!HONOR_SNANS (mode)
5316 && (!INTEGRAL_MODE_P (mode)
5317 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5319 if (INTEGRAL_MODE_P (mode)
5320 && (issue_strict_overflow_warning
5321 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5322 warning (OPT_Wstrict_overflow,
5323 ("assuming signed overflow does not occur when "
5324 "assuming abs (x) < 0 is false"));
5325 return const0_rtx;
5327 break;
5329 case GE:
5330 /* Optimize abs(x) >= 0.0. */
5331 if (!HONOR_NANS (mode)
5332 && (!INTEGRAL_MODE_P (mode)
5333 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5335 if (INTEGRAL_MODE_P (mode)
5336 && (issue_strict_overflow_warning
5337 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5338 warning (OPT_Wstrict_overflow,
5339 ("assuming signed overflow does not occur when "
5340 "assuming abs (x) >= 0 is true"));
5341 return const_true_rtx;
5343 break;
5345 case UNGE:
5346 /* Optimize ! (abs(x) < 0.0). */
5347 return const_true_rtx;
5349 default:
5350 break;
5354 return 0;
5357 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5358 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5359 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5360 can be simplified to that or NULL_RTX if not.
5361 Assume X is compared against zero with CMP_CODE and the true
5362 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5364 static rtx
5365 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5367 if (cmp_code != EQ && cmp_code != NE)
5368 return NULL_RTX;
5370 /* Result on X == 0 and X !=0 respectively. */
5371 rtx on_zero, on_nonzero;
5372 if (cmp_code == EQ)
5374 on_zero = true_val;
5375 on_nonzero = false_val;
5377 else
5379 on_zero = false_val;
5380 on_nonzero = true_val;
5383 rtx_code op_code = GET_CODE (on_nonzero);
5384 if ((op_code != CLZ && op_code != CTZ)
5385 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5386 || !CONST_INT_P (on_zero))
5387 return NULL_RTX;
5389 HOST_WIDE_INT op_val;
5390 if (((op_code == CLZ
5391 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5392 || (op_code == CTZ
5393 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5394 && op_val == INTVAL (on_zero))
5395 return on_nonzero;
5397 return NULL_RTX;
5401 /* Simplify CODE, an operation with result mode MODE and three operands,
5402 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5403 a constant. Return 0 if no simplifications is possible. */
5406 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5407 machine_mode op0_mode, rtx op0, rtx op1,
5408 rtx op2)
5410 unsigned int width = GET_MODE_PRECISION (mode);
5411 bool any_change = false;
5412 rtx tem, trueop2;
5414 /* VOIDmode means "infinite" precision. */
5415 if (width == 0)
5416 width = HOST_BITS_PER_WIDE_INT;
5418 switch (code)
5420 case FMA:
5421 /* Simplify negations around the multiplication. */
5422 /* -a * -b + c => a * b + c. */
5423 if (GET_CODE (op0) == NEG)
5425 tem = simplify_unary_operation (NEG, mode, op1, mode);
5426 if (tem)
5427 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5429 else if (GET_CODE (op1) == NEG)
5431 tem = simplify_unary_operation (NEG, mode, op0, mode);
5432 if (tem)
5433 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5436 /* Canonicalize the two multiplication operands. */
5437 /* a * -b + c => -b * a + c. */
5438 if (swap_commutative_operands_p (op0, op1))
5439 std::swap (op0, op1), any_change = true;
5441 if (any_change)
5442 return gen_rtx_FMA (mode, op0, op1, op2);
5443 return NULL_RTX;
5445 case SIGN_EXTRACT:
5446 case ZERO_EXTRACT:
5447 if (CONST_INT_P (op0)
5448 && CONST_INT_P (op1)
5449 && CONST_INT_P (op2)
5450 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5451 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5453 /* Extracting a bit-field from a constant */
5454 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5455 HOST_WIDE_INT op1val = INTVAL (op1);
5456 HOST_WIDE_INT op2val = INTVAL (op2);
5457 if (BITS_BIG_ENDIAN)
5458 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5459 else
5460 val >>= op2val;
5462 if (HOST_BITS_PER_WIDE_INT != op1val)
5464 /* First zero-extend. */
5465 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5466 /* If desired, propagate sign bit. */
5467 if (code == SIGN_EXTRACT
5468 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5469 != 0)
5470 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5473 return gen_int_mode (val, mode);
5475 break;
5477 case IF_THEN_ELSE:
5478 if (CONST_INT_P (op0))
5479 return op0 != const0_rtx ? op1 : op2;
5481 /* Convert c ? a : a into "a". */
5482 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5483 return op1;
5485 /* Convert a != b ? a : b into "a". */
5486 if (GET_CODE (op0) == NE
5487 && ! side_effects_p (op0)
5488 && ! HONOR_NANS (mode)
5489 && ! HONOR_SIGNED_ZEROS (mode)
5490 && ((rtx_equal_p (XEXP (op0, 0), op1)
5491 && rtx_equal_p (XEXP (op0, 1), op2))
5492 || (rtx_equal_p (XEXP (op0, 0), op2)
5493 && rtx_equal_p (XEXP (op0, 1), op1))))
5494 return op1;
5496 /* Convert a == b ? a : b into "b". */
5497 if (GET_CODE (op0) == EQ
5498 && ! side_effects_p (op0)
5499 && ! HONOR_NANS (mode)
5500 && ! HONOR_SIGNED_ZEROS (mode)
5501 && ((rtx_equal_p (XEXP (op0, 0), op1)
5502 && rtx_equal_p (XEXP (op0, 1), op2))
5503 || (rtx_equal_p (XEXP (op0, 0), op2)
5504 && rtx_equal_p (XEXP (op0, 1), op1))))
5505 return op2;
5507 /* Convert (!c) != {0,...,0} ? a : b into
5508 c != {0,...,0} ? b : a for vector modes. */
5509 if (VECTOR_MODE_P (GET_MODE (op1))
5510 && GET_CODE (op0) == NE
5511 && GET_CODE (XEXP (op0, 0)) == NOT
5512 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5514 rtx cv = XEXP (op0, 1);
5515 int nunits = CONST_VECTOR_NUNITS (cv);
5516 bool ok = true;
5517 for (int i = 0; i < nunits; ++i)
5518 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5520 ok = false;
5521 break;
5523 if (ok)
5525 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5526 XEXP (XEXP (op0, 0), 0),
5527 XEXP (op0, 1));
5528 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5529 return retval;
5533 /* Convert x == 0 ? N : clz (x) into clz (x) when
5534 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5535 Similarly for ctz (x). */
5536 if (COMPARISON_P (op0) && !side_effects_p (op0)
5537 && XEXP (op0, 1) == const0_rtx)
5539 rtx simplified
5540 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5541 op1, op2);
5542 if (simplified)
5543 return simplified;
5546 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5548 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5549 ? GET_MODE (XEXP (op0, 1))
5550 : GET_MODE (XEXP (op0, 0)));
5551 rtx temp;
5553 /* Look for happy constants in op1 and op2. */
5554 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5556 HOST_WIDE_INT t = INTVAL (op1);
5557 HOST_WIDE_INT f = INTVAL (op2);
5559 if (t == STORE_FLAG_VALUE && f == 0)
5560 code = GET_CODE (op0);
5561 else if (t == 0 && f == STORE_FLAG_VALUE)
5563 enum rtx_code tmp;
5564 tmp = reversed_comparison_code (op0, NULL);
5565 if (tmp == UNKNOWN)
5566 break;
5567 code = tmp;
5569 else
5570 break;
5572 return simplify_gen_relational (code, mode, cmp_mode,
5573 XEXP (op0, 0), XEXP (op0, 1));
5576 if (cmp_mode == VOIDmode)
5577 cmp_mode = op0_mode;
5578 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5579 cmp_mode, XEXP (op0, 0),
5580 XEXP (op0, 1));
5582 /* See if any simplifications were possible. */
5583 if (temp)
5585 if (CONST_INT_P (temp))
5586 return temp == const0_rtx ? op2 : op1;
5587 else if (temp)
5588 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5591 break;
5593 case VEC_MERGE:
5594 gcc_assert (GET_MODE (op0) == mode);
5595 gcc_assert (GET_MODE (op1) == mode);
5596 gcc_assert (VECTOR_MODE_P (mode));
5597 trueop2 = avoid_constant_pool_reference (op2);
5598 if (CONST_INT_P (trueop2))
5600 int elt_size = GET_MODE_UNIT_SIZE (mode);
5601 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5602 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5603 unsigned HOST_WIDE_INT mask;
5604 if (n_elts == HOST_BITS_PER_WIDE_INT)
5605 mask = -1;
5606 else
5607 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5609 if (!(sel & mask) && !side_effects_p (op0))
5610 return op1;
5611 if ((sel & mask) == mask && !side_effects_p (op1))
5612 return op0;
5614 rtx trueop0 = avoid_constant_pool_reference (op0);
5615 rtx trueop1 = avoid_constant_pool_reference (op1);
5616 if (GET_CODE (trueop0) == CONST_VECTOR
5617 && GET_CODE (trueop1) == CONST_VECTOR)
5619 rtvec v = rtvec_alloc (n_elts);
5620 unsigned int i;
5622 for (i = 0; i < n_elts; i++)
5623 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5624 ? CONST_VECTOR_ELT (trueop0, i)
5625 : CONST_VECTOR_ELT (trueop1, i));
5626 return gen_rtx_CONST_VECTOR (mode, v);
5629 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5630 if no element from a appears in the result. */
5631 if (GET_CODE (op0) == VEC_MERGE)
5633 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5634 if (CONST_INT_P (tem))
5636 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5637 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5638 return simplify_gen_ternary (code, mode, mode,
5639 XEXP (op0, 1), op1, op2);
5640 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5641 return simplify_gen_ternary (code, mode, mode,
5642 XEXP (op0, 0), op1, op2);
5645 if (GET_CODE (op1) == VEC_MERGE)
5647 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5648 if (CONST_INT_P (tem))
5650 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5651 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5652 return simplify_gen_ternary (code, mode, mode,
5653 op0, XEXP (op1, 1), op2);
5654 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5655 return simplify_gen_ternary (code, mode, mode,
5656 op0, XEXP (op1, 0), op2);
5660 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5661 with a. */
5662 if (GET_CODE (op0) == VEC_DUPLICATE
5663 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5664 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5665 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5667 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5668 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5670 if (XEXP (XEXP (op0, 0), 0) == op1
5671 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5672 return op1;
5677 if (rtx_equal_p (op0, op1)
5678 && !side_effects_p (op2) && !side_effects_p (op1))
5679 return op0;
5681 break;
5683 default:
5684 gcc_unreachable ();
5687 return 0;
5690 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5691 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5692 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5694 Works by unpacking OP into a collection of 8-bit values
5695 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5696 and then repacking them again for OUTERMODE. */
5698 static rtx
5699 simplify_immed_subreg (machine_mode outermode, rtx op,
5700 machine_mode innermode, unsigned int byte)
5702 enum {
5703 value_bit = 8,
5704 value_mask = (1 << value_bit) - 1
5706 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5707 int value_start;
5708 int i;
5709 int elem;
5711 int num_elem;
5712 rtx * elems;
5713 int elem_bitsize;
5714 rtx result_s = NULL;
5715 rtvec result_v = NULL;
5716 enum mode_class outer_class;
5717 machine_mode outer_submode;
5718 int max_bitsize;
5720 /* Some ports misuse CCmode. */
5721 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5722 return op;
5724 /* We have no way to represent a complex constant at the rtl level. */
5725 if (COMPLEX_MODE_P (outermode))
5726 return NULL_RTX;
5728 /* We support any size mode. */
5729 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5730 GET_MODE_BITSIZE (innermode));
5732 /* Unpack the value. */
5734 if (GET_CODE (op) == CONST_VECTOR)
5736 num_elem = CONST_VECTOR_NUNITS (op);
5737 elems = &CONST_VECTOR_ELT (op, 0);
5738 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5740 else
5742 num_elem = 1;
5743 elems = &op;
5744 elem_bitsize = max_bitsize;
5746 /* If this asserts, it is too complicated; reducing value_bit may help. */
5747 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5748 /* I don't know how to handle endianness of sub-units. */
5749 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5751 for (elem = 0; elem < num_elem; elem++)
5753 unsigned char * vp;
5754 rtx el = elems[elem];
5756 /* Vectors are kept in target memory order. (This is probably
5757 a mistake.) */
5759 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5760 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5761 / BITS_PER_UNIT);
5762 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5763 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5764 unsigned bytele = (subword_byte % UNITS_PER_WORD
5765 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5766 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5769 switch (GET_CODE (el))
5771 case CONST_INT:
5772 for (i = 0;
5773 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5774 i += value_bit)
5775 *vp++ = INTVAL (el) >> i;
5776 /* CONST_INTs are always logically sign-extended. */
5777 for (; i < elem_bitsize; i += value_bit)
5778 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5779 break;
5781 case CONST_WIDE_INT:
5783 rtx_mode_t val = rtx_mode_t (el, innermode);
5784 unsigned char extend = wi::sign_mask (val);
5785 int prec = wi::get_precision (val);
5787 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5788 *vp++ = wi::extract_uhwi (val, i, value_bit);
5789 for (; i < elem_bitsize; i += value_bit)
5790 *vp++ = extend;
5792 break;
5794 case CONST_DOUBLE:
5795 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5797 unsigned char extend = 0;
5798 /* If this triggers, someone should have generated a
5799 CONST_INT instead. */
5800 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5802 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5803 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5804 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5806 *vp++
5807 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5808 i += value_bit;
5811 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5812 extend = -1;
5813 for (; i < elem_bitsize; i += value_bit)
5814 *vp++ = extend;
5816 else
5818 /* This is big enough for anything on the platform. */
5819 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5820 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5822 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5823 gcc_assert (bitsize <= elem_bitsize);
5824 gcc_assert (bitsize % value_bit == 0);
5826 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5827 GET_MODE (el));
5829 /* real_to_target produces its result in words affected by
5830 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5831 and use WORDS_BIG_ENDIAN instead; see the documentation
5832 of SUBREG in rtl.texi. */
5833 for (i = 0; i < bitsize; i += value_bit)
5835 int ibase;
5836 if (WORDS_BIG_ENDIAN)
5837 ibase = bitsize - 1 - i;
5838 else
5839 ibase = i;
5840 *vp++ = tmp[ibase / 32] >> i % 32;
5843 /* It shouldn't matter what's done here, so fill it with
5844 zero. */
5845 for (; i < elem_bitsize; i += value_bit)
5846 *vp++ = 0;
5848 break;
5850 case CONST_FIXED:
5851 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5853 for (i = 0; i < elem_bitsize; i += value_bit)
5854 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5856 else
5858 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5859 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5860 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5861 i += value_bit)
5862 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5863 >> (i - HOST_BITS_PER_WIDE_INT);
5864 for (; i < elem_bitsize; i += value_bit)
5865 *vp++ = 0;
5867 break;
5869 default:
5870 gcc_unreachable ();
5874 /* Now, pick the right byte to start with. */
5875 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5876 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5877 will already have offset 0. */
5878 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5880 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5881 - byte);
5882 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5883 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5884 byte = (subword_byte % UNITS_PER_WORD
5885 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5888 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5889 so if it's become negative it will instead be very large.) */
5890 gcc_assert (byte < GET_MODE_SIZE (innermode));
5892 /* Convert from bytes to chunks of size value_bit. */
5893 value_start = byte * (BITS_PER_UNIT / value_bit);
5895 /* Re-pack the value. */
5896 num_elem = GET_MODE_NUNITS (outermode);
5898 if (VECTOR_MODE_P (outermode))
5900 result_v = rtvec_alloc (num_elem);
5901 elems = &RTVEC_ELT (result_v, 0);
5903 else
5904 elems = &result_s;
5906 outer_submode = GET_MODE_INNER (outermode);
5907 outer_class = GET_MODE_CLASS (outer_submode);
5908 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5910 gcc_assert (elem_bitsize % value_bit == 0);
5911 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5913 for (elem = 0; elem < num_elem; elem++)
5915 unsigned char *vp;
5917 /* Vectors are stored in target memory order. (This is probably
5918 a mistake.) */
5920 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5921 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5922 / BITS_PER_UNIT);
5923 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5924 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5925 unsigned bytele = (subword_byte % UNITS_PER_WORD
5926 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5927 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5930 switch (outer_class)
5932 case MODE_INT:
5933 case MODE_PARTIAL_INT:
5935 int u;
5936 int base = 0;
5937 int units
5938 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5939 / HOST_BITS_PER_WIDE_INT;
5940 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5941 wide_int r;
5943 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5944 return NULL_RTX;
5945 for (u = 0; u < units; u++)
5947 unsigned HOST_WIDE_INT buf = 0;
5948 for (i = 0;
5949 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5950 i += value_bit)
5951 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5953 tmp[u] = buf;
5954 base += HOST_BITS_PER_WIDE_INT;
5956 r = wide_int::from_array (tmp, units,
5957 GET_MODE_PRECISION (outer_submode));
5958 #if TARGET_SUPPORTS_WIDE_INT == 0
5959 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5960 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5961 return NULL_RTX;
5962 #endif
5963 elems[elem] = immed_wide_int_const (r, outer_submode);
5965 break;
5967 case MODE_FLOAT:
5968 case MODE_DECIMAL_FLOAT:
5970 REAL_VALUE_TYPE r;
5971 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5973 /* real_from_target wants its input in words affected by
5974 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5975 and use WORDS_BIG_ENDIAN instead; see the documentation
5976 of SUBREG in rtl.texi. */
5977 for (i = 0; i < elem_bitsize; i += value_bit)
5979 int ibase;
5980 if (WORDS_BIG_ENDIAN)
5981 ibase = elem_bitsize - 1 - i;
5982 else
5983 ibase = i;
5984 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5987 real_from_target (&r, tmp, outer_submode);
5988 elems[elem] = const_double_from_real_value (r, outer_submode);
5990 break;
5992 case MODE_FRACT:
5993 case MODE_UFRACT:
5994 case MODE_ACCUM:
5995 case MODE_UACCUM:
5997 FIXED_VALUE_TYPE f;
5998 f.data.low = 0;
5999 f.data.high = 0;
6000 f.mode = outer_submode;
6002 for (i = 0;
6003 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6004 i += value_bit)
6005 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6006 for (; i < elem_bitsize; i += value_bit)
6007 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6008 << (i - HOST_BITS_PER_WIDE_INT));
6010 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6012 break;
6014 default:
6015 gcc_unreachable ();
6018 if (VECTOR_MODE_P (outermode))
6019 return gen_rtx_CONST_VECTOR (outermode, result_v);
6020 else
6021 return result_s;
6024 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6025 Return 0 if no simplifications are possible. */
6027 simplify_subreg (machine_mode outermode, rtx op,
6028 machine_mode innermode, unsigned int byte)
6030 /* Little bit of sanity checking. */
6031 gcc_assert (innermode != VOIDmode);
6032 gcc_assert (outermode != VOIDmode);
6033 gcc_assert (innermode != BLKmode);
6034 gcc_assert (outermode != BLKmode);
6036 gcc_assert (GET_MODE (op) == innermode
6037 || GET_MODE (op) == VOIDmode);
6039 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6040 return NULL_RTX;
6042 if (byte >= GET_MODE_SIZE (innermode))
6043 return NULL_RTX;
6045 if (outermode == innermode && !byte)
6046 return op;
6048 if (CONST_SCALAR_INT_P (op)
6049 || CONST_DOUBLE_AS_FLOAT_P (op)
6050 || GET_CODE (op) == CONST_FIXED
6051 || GET_CODE (op) == CONST_VECTOR)
6052 return simplify_immed_subreg (outermode, op, innermode, byte);
6054 /* Changing mode twice with SUBREG => just change it once,
6055 or not at all if changing back op starting mode. */
6056 if (GET_CODE (op) == SUBREG)
6058 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6059 int final_offset = byte + SUBREG_BYTE (op);
6060 rtx newx;
6062 if (outermode == innermostmode
6063 && byte == 0 && SUBREG_BYTE (op) == 0)
6064 return SUBREG_REG (op);
6066 /* The SUBREG_BYTE represents offset, as if the value were stored
6067 in memory. Irritating exception is paradoxical subreg, where
6068 we define SUBREG_BYTE to be 0. On big endian machines, this
6069 value should be negative. For a moment, undo this exception. */
6070 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6072 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6073 if (WORDS_BIG_ENDIAN)
6074 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6075 if (BYTES_BIG_ENDIAN)
6076 final_offset += difference % UNITS_PER_WORD;
6078 if (SUBREG_BYTE (op) == 0
6079 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6081 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6082 if (WORDS_BIG_ENDIAN)
6083 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6084 if (BYTES_BIG_ENDIAN)
6085 final_offset += difference % UNITS_PER_WORD;
6088 /* See whether resulting subreg will be paradoxical. */
6089 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
6091 /* In nonparadoxical subregs we can't handle negative offsets. */
6092 if (final_offset < 0)
6093 return NULL_RTX;
6094 /* Bail out in case resulting subreg would be incorrect. */
6095 if (final_offset % GET_MODE_SIZE (outermode)
6096 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6097 return NULL_RTX;
6099 else
6101 int offset = 0;
6102 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6104 /* In paradoxical subreg, see if we are still looking on lower part.
6105 If so, our SUBREG_BYTE will be 0. */
6106 if (WORDS_BIG_ENDIAN)
6107 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6108 if (BYTES_BIG_ENDIAN)
6109 offset += difference % UNITS_PER_WORD;
6110 if (offset == final_offset)
6111 final_offset = 0;
6112 else
6113 return NULL_RTX;
6116 /* Recurse for further possible simplifications. */
6117 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6118 final_offset);
6119 if (newx)
6120 return newx;
6121 if (validate_subreg (outermode, innermostmode,
6122 SUBREG_REG (op), final_offset))
6124 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6125 if (SUBREG_PROMOTED_VAR_P (op)
6126 && SUBREG_PROMOTED_SIGN (op) >= 0
6127 && GET_MODE_CLASS (outermode) == MODE_INT
6128 && IN_RANGE (GET_MODE_SIZE (outermode),
6129 GET_MODE_SIZE (innermode),
6130 GET_MODE_SIZE (innermostmode))
6131 && subreg_lowpart_p (newx))
6133 SUBREG_PROMOTED_VAR_P (newx) = 1;
6134 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6136 return newx;
6138 return NULL_RTX;
6141 /* SUBREG of a hard register => just change the register number
6142 and/or mode. If the hard register is not valid in that mode,
6143 suppress this simplification. If the hard register is the stack,
6144 frame, or argument pointer, leave this as a SUBREG. */
6146 if (REG_P (op) && HARD_REGISTER_P (op))
6148 unsigned int regno, final_regno;
6150 regno = REGNO (op);
6151 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6152 if (HARD_REGISTER_NUM_P (final_regno))
6154 rtx x;
6155 int final_offset = byte;
6157 /* Adjust offset for paradoxical subregs. */
6158 if (byte == 0
6159 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6161 int difference = (GET_MODE_SIZE (innermode)
6162 - GET_MODE_SIZE (outermode));
6163 if (WORDS_BIG_ENDIAN)
6164 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6165 if (BYTES_BIG_ENDIAN)
6166 final_offset += difference % UNITS_PER_WORD;
6169 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6171 /* Propagate original regno. We don't have any way to specify
6172 the offset inside original regno, so do so only for lowpart.
6173 The information is used only by alias analysis that can not
6174 grog partial register anyway. */
6176 if (subreg_lowpart_offset (outermode, innermode) == byte)
6177 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6178 return x;
6182 /* If we have a SUBREG of a register that we are replacing and we are
6183 replacing it with a MEM, make a new MEM and try replacing the
6184 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6185 or if we would be widening it. */
6187 if (MEM_P (op)
6188 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6189 /* Allow splitting of volatile memory references in case we don't
6190 have instruction to move the whole thing. */
6191 && (! MEM_VOLATILE_P (op)
6192 || ! have_insn_for (SET, innermode))
6193 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6194 return adjust_address_nv (op, outermode, byte);
6196 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6197 of two parts. */
6198 if (GET_CODE (op) == CONCAT
6199 || GET_CODE (op) == VEC_CONCAT)
6201 unsigned int part_size, final_offset;
6202 rtx part, res;
6204 enum machine_mode part_mode = GET_MODE (XEXP (op, 0));
6205 if (part_mode == VOIDmode)
6206 part_mode = GET_MODE_INNER (GET_MODE (op));
6207 part_size = GET_MODE_SIZE (part_mode);
6208 if (byte < part_size)
6210 part = XEXP (op, 0);
6211 final_offset = byte;
6213 else
6215 part = XEXP (op, 1);
6216 final_offset = byte - part_size;
6219 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6220 return NULL_RTX;
6222 part_mode = GET_MODE (part);
6223 if (part_mode == VOIDmode)
6224 part_mode = GET_MODE_INNER (GET_MODE (op));
6225 res = simplify_subreg (outermode, part, part_mode, final_offset);
6226 if (res)
6227 return res;
6228 if (validate_subreg (outermode, part_mode, part, final_offset))
6229 return gen_rtx_SUBREG (outermode, part, final_offset);
6230 return NULL_RTX;
6233 /* A SUBREG resulting from a zero extension may fold to zero if
6234 it extracts higher bits that the ZERO_EXTEND's source bits. */
6235 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6237 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6238 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6239 return CONST0_RTX (outermode);
6242 if (SCALAR_INT_MODE_P (outermode)
6243 && SCALAR_INT_MODE_P (innermode)
6244 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6245 && byte == subreg_lowpart_offset (outermode, innermode))
6247 rtx tem = simplify_truncation (outermode, op, innermode);
6248 if (tem)
6249 return tem;
6252 return NULL_RTX;
6255 /* Make a SUBREG operation or equivalent if it folds. */
6258 simplify_gen_subreg (machine_mode outermode, rtx op,
6259 machine_mode innermode, unsigned int byte)
6261 rtx newx;
6263 newx = simplify_subreg (outermode, op, innermode, byte);
6264 if (newx)
6265 return newx;
6267 if (GET_CODE (op) == SUBREG
6268 || GET_CODE (op) == CONCAT
6269 || GET_MODE (op) == VOIDmode)
6270 return NULL_RTX;
6272 if (validate_subreg (outermode, innermode, op, byte))
6273 return gen_rtx_SUBREG (outermode, op, byte);
6275 return NULL_RTX;
6278 /* Generates a subreg to get the least significant part of EXPR (in mode
6279 INNER_MODE) to OUTER_MODE. */
6282 lowpart_subreg (machine_mode outer_mode, rtx expr,
6283 machine_mode inner_mode)
6285 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6286 subreg_lowpart_offset (outer_mode, inner_mode));
6289 /* Simplify X, an rtx expression.
6291 Return the simplified expression or NULL if no simplifications
6292 were possible.
6294 This is the preferred entry point into the simplification routines;
6295 however, we still allow passes to call the more specific routines.
6297 Right now GCC has three (yes, three) major bodies of RTL simplification
6298 code that need to be unified.
6300 1. fold_rtx in cse.c. This code uses various CSE specific
6301 information to aid in RTL simplification.
6303 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6304 it uses combine specific information to aid in RTL
6305 simplification.
6307 3. The routines in this file.
6310 Long term we want to only have one body of simplification code; to
6311 get to that state I recommend the following steps:
6313 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6314 which are not pass dependent state into these routines.
6316 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6317 use this routine whenever possible.
6319 3. Allow for pass dependent state to be provided to these
6320 routines and add simplifications based on the pass dependent
6321 state. Remove code from cse.c & combine.c that becomes
6322 redundant/dead.
6324 It will take time, but ultimately the compiler will be easier to
6325 maintain and improve. It's totally silly that when we add a
6326 simplification that it needs to be added to 4 places (3 for RTL
6327 simplification and 1 for tree simplification. */
6330 simplify_rtx (const_rtx x)
6332 const enum rtx_code code = GET_CODE (x);
6333 const machine_mode mode = GET_MODE (x);
6335 switch (GET_RTX_CLASS (code))
6337 case RTX_UNARY:
6338 return simplify_unary_operation (code, mode,
6339 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6340 case RTX_COMM_ARITH:
6341 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6342 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6344 /* Fall through. */
6346 case RTX_BIN_ARITH:
6347 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6349 case RTX_TERNARY:
6350 case RTX_BITFIELD_OPS:
6351 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6352 XEXP (x, 0), XEXP (x, 1),
6353 XEXP (x, 2));
6355 case RTX_COMPARE:
6356 case RTX_COMM_COMPARE:
6357 return simplify_relational_operation (code, mode,
6358 ((GET_MODE (XEXP (x, 0))
6359 != VOIDmode)
6360 ? GET_MODE (XEXP (x, 0))
6361 : GET_MODE (XEXP (x, 1))),
6362 XEXP (x, 0),
6363 XEXP (x, 1));
6365 case RTX_EXTRA:
6366 if (code == SUBREG)
6367 return simplify_subreg (mode, SUBREG_REG (x),
6368 GET_MODE (SUBREG_REG (x)),
6369 SUBREG_BYTE (x));
6370 break;
6372 case RTX_OBJ:
6373 if (code == LO_SUM)
6375 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6376 if (GET_CODE (XEXP (x, 0)) == HIGH
6377 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6378 return XEXP (x, 1);
6380 break;
6382 default:
6383 break;
6385 return NULL;