PR tree-optimization/78496
[official-gcc.git] / gcc / simplify-rtx.c
blobac85a564b10cf3a6ea16e7d21ef4fe74d86f47b1
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
37 /* Simplification and canonicalization of RTL. */
39 /* Much code operates on (low, high) pairs; the low value is an
40 unsigned wide int, the high value a signed wide int. We
41 occasionally need to sign extend from low to high as if low were a
42 signed wide int. */
43 #define HWI_SIGN_EXTEND(low) \
44 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
46 static rtx neg_const_int (machine_mode, const_rtx);
47 static bool plus_minus_operand_p (const_rtx);
48 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
49 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
50 unsigned int);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 #if TARGET_SUPPORTS_WIDE_INT
92 else if (CONST_WIDE_INT_P (x))
94 unsigned int i;
95 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
96 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
97 return false;
98 for (i = 0; i < elts - 1; i++)
99 if (CONST_WIDE_INT_ELT (x, i) != 0)
100 return false;
101 val = CONST_WIDE_INT_ELT (x, elts - 1);
102 width %= HOST_BITS_PER_WIDE_INT;
103 if (width == 0)
104 width = HOST_BITS_PER_WIDE_INT;
106 #else
107 else if (width <= HOST_BITS_PER_DOUBLE_INT
108 && CONST_DOUBLE_AS_INT_P (x)
109 && CONST_DOUBLE_LOW (x) == 0)
111 val = CONST_DOUBLE_HIGH (x);
112 width -= HOST_BITS_PER_WIDE_INT;
114 #endif
115 else
116 /* X is not an integer constant. */
117 return false;
119 if (width < HOST_BITS_PER_WIDE_INT)
120 val &= (HOST_WIDE_INT_1U << width) - 1;
121 return val == (HOST_WIDE_INT_1U << (width - 1));
124 /* Test whether VAL is equal to the most significant bit of mode MODE
125 (after masking with the mode mask of MODE). Returns false if the
126 precision of MODE is too large to handle. */
128 bool
129 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 unsigned int width;
133 if (GET_MODE_CLASS (mode) != MODE_INT)
134 return false;
136 width = GET_MODE_PRECISION (mode);
137 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
138 return false;
140 val &= GET_MODE_MASK (mode);
141 return val == (HOST_WIDE_INT_1U << (width - 1));
144 /* Test whether the most significant bit of mode MODE is set in VAL.
145 Returns false if the precision of MODE is too large to handle. */
146 bool
147 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
149 unsigned int width;
151 if (GET_MODE_CLASS (mode) != MODE_INT)
152 return false;
154 width = GET_MODE_PRECISION (mode);
155 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
156 return false;
158 val &= HOST_WIDE_INT_1U << (width - 1);
159 return val != 0;
162 /* Test whether the most significant bit of mode MODE is clear in VAL.
163 Returns false if the precision of MODE is too large to handle. */
164 bool
165 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
167 unsigned int width;
169 if (GET_MODE_CLASS (mode) != MODE_INT)
170 return false;
172 width = GET_MODE_PRECISION (mode);
173 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
174 return false;
176 val &= HOST_WIDE_INT_1U << (width - 1);
177 return val == 0;
180 /* Make a binary operation by properly ordering the operands and
181 seeing if the expression folds. */
184 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
185 rtx op1)
187 rtx tem;
189 /* If this simplifies, do it. */
190 tem = simplify_binary_operation (code, mode, op0, op1);
191 if (tem)
192 return tem;
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
196 && swap_commutative_operands_p (op0, op1))
197 std::swap (op0, op1);
199 return gen_rtx_fmt_ee (code, mode, op0, op1);
202 /* If X is a MEM referencing the constant pool, return the real value.
203 Otherwise return X. */
205 avoid_constant_pool_reference (rtx x)
207 rtx c, tmp, addr;
208 machine_mode cmode;
209 HOST_WIDE_INT offset = 0;
211 switch (GET_CODE (x))
213 case MEM:
214 break;
216 case FLOAT_EXTEND:
217 /* Handle float extensions of constant pool references. */
218 tmp = XEXP (x, 0);
219 c = avoid_constant_pool_reference (tmp);
220 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
221 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
222 GET_MODE (x));
223 return x;
225 default:
226 return x;
229 if (GET_MODE (x) == BLKmode)
230 return x;
232 addr = XEXP (x, 0);
234 /* Call target hook to avoid the effects of -fpic etc.... */
235 addr = targetm.delegitimize_address (addr);
237 /* Split the address into a base and integer offset. */
238 if (GET_CODE (addr) == CONST
239 && GET_CODE (XEXP (addr, 0)) == PLUS
240 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
242 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
243 addr = XEXP (XEXP (addr, 0), 0);
246 if (GET_CODE (addr) == LO_SUM)
247 addr = XEXP (addr, 1);
249 /* If this is a constant pool reference, we can turn it into its
250 constant and hope that simplifications happen. */
251 if (GET_CODE (addr) == SYMBOL_REF
252 && CONSTANT_POOL_ADDRESS_P (addr))
254 c = get_pool_constant (addr);
255 cmode = get_pool_mode (addr);
257 /* If we're accessing the constant in a different mode than it was
258 originally stored, attempt to fix that up via subreg simplifications.
259 If that fails we have no choice but to return the original memory. */
260 if (offset == 0 && cmode == GET_MODE (x))
261 return c;
262 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
264 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
265 if (tem && CONSTANT_P (tem))
266 return tem;
270 return x;
273 /* Simplify a MEM based on its attributes. This is the default
274 delegitimize_address target hook, and it's recommended that every
275 overrider call it. */
278 delegitimize_mem_from_attrs (rtx x)
280 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
281 use their base addresses as equivalent. */
282 if (MEM_P (x)
283 && MEM_EXPR (x)
284 && MEM_OFFSET_KNOWN_P (x))
286 tree decl = MEM_EXPR (x);
287 machine_mode mode = GET_MODE (x);
288 HOST_WIDE_INT offset = 0;
290 switch (TREE_CODE (decl))
292 default:
293 decl = NULL;
294 break;
296 case VAR_DECL:
297 break;
299 case ARRAY_REF:
300 case ARRAY_RANGE_REF:
301 case COMPONENT_REF:
302 case BIT_FIELD_REF:
303 case REALPART_EXPR:
304 case IMAGPART_EXPR:
305 case VIEW_CONVERT_EXPR:
307 HOST_WIDE_INT bitsize, bitpos;
308 tree toffset;
309 int unsignedp, reversep, volatilep = 0;
311 decl
312 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
313 &unsignedp, &reversep, &volatilep);
314 if (bitsize != GET_MODE_BITSIZE (mode)
315 || (bitpos % BITS_PER_UNIT)
316 || (toffset && !tree_fits_shwi_p (toffset)))
317 decl = NULL;
318 else
320 offset += bitpos / BITS_PER_UNIT;
321 if (toffset)
322 offset += tree_to_shwi (toffset);
324 break;
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
336 rtx newx;
338 offset += MEM_OFFSET (x);
340 newx = DECL_RTL (decl);
342 if (MEM_P (newx))
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
346 /* Avoid creating a new MEM needlessly if we already had
347 the same address. We do if there's no OFFSET and the
348 old address X is identical to NEWX, or if X is of the
349 form (plus NEWX OFFSET), or the NEWX is of the form
350 (plus Y (const_int Z)) and X is that with the offset
351 added: (plus Y (const_int Z+OFFSET)). */
352 if (!((offset == 0
353 || (GET_CODE (o) == PLUS
354 && GET_CODE (XEXP (o, 1)) == CONST_INT
355 && (offset == INTVAL (XEXP (o, 1))
356 || (GET_CODE (n) == PLUS
357 && GET_CODE (XEXP (n, 1)) == CONST_INT
358 && (INTVAL (XEXP (n, 1)) + offset
359 == INTVAL (XEXP (o, 1)))
360 && (n = XEXP (n, 0))))
361 && (o = XEXP (o, 0))))
362 && rtx_equal_p (o, n)))
363 x = adjust_address_nv (newx, mode, offset);
365 else if (GET_MODE (x) == GET_MODE (newx)
366 && offset == 0)
367 x = newx;
371 return x;
374 /* Make a unary operation by first seeing if it folds and otherwise making
375 the specified operation. */
378 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
379 machine_mode op_mode)
381 rtx tem;
383 /* If this simplifies, use it. */
384 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
385 return tem;
387 return gen_rtx_fmt_e (code, mode, op);
390 /* Likewise for ternary operations. */
393 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
394 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
396 rtx tem;
398 /* If this simplifies, use it. */
399 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
400 op0, op1, op2)))
401 return tem;
403 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
406 /* Likewise, for relational operations.
407 CMP_MODE specifies mode comparison is done in. */
410 simplify_gen_relational (enum rtx_code code, machine_mode mode,
411 machine_mode cmp_mode, rtx op0, rtx op1)
413 rtx tem;
415 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
416 op0, op1)))
417 return tem;
419 return gen_rtx_fmt_ee (code, mode, op0, op1);
422 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
423 and simplify the result. If FN is non-NULL, call this callback on each
424 X, if it returns non-NULL, replace X with its return value and simplify the
425 result. */
428 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
429 rtx (*fn) (rtx, const_rtx, void *), void *data)
431 enum rtx_code code = GET_CODE (x);
432 machine_mode mode = GET_MODE (x);
433 machine_mode op_mode;
434 const char *fmt;
435 rtx op0, op1, op2, newx, op;
436 rtvec vec, newvec;
437 int i, j;
439 if (__builtin_expect (fn != NULL, 0))
441 newx = fn (x, old_rtx, data);
442 if (newx)
443 return newx;
445 else if (rtx_equal_p (x, old_rtx))
446 return copy_rtx ((rtx) data);
448 switch (GET_RTX_CLASS (code))
450 case RTX_UNARY:
451 op0 = XEXP (x, 0);
452 op_mode = GET_MODE (op0);
453 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
454 if (op0 == XEXP (x, 0))
455 return x;
456 return simplify_gen_unary (code, mode, op0, op_mode);
458 case RTX_BIN_ARITH:
459 case RTX_COMM_ARITH:
460 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
461 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
463 return x;
464 return simplify_gen_binary (code, mode, op0, op1);
466 case RTX_COMPARE:
467 case RTX_COMM_COMPARE:
468 op0 = XEXP (x, 0);
469 op1 = XEXP (x, 1);
470 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
471 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
472 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
473 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
474 return x;
475 return simplify_gen_relational (code, mode, op_mode, op0, op1);
477 case RTX_TERNARY:
478 case RTX_BITFIELD_OPS:
479 op0 = XEXP (x, 0);
480 op_mode = GET_MODE (op0);
481 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
482 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
483 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
484 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
485 return x;
486 if (op_mode == VOIDmode)
487 op_mode = GET_MODE (op0);
488 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
490 case RTX_EXTRA:
491 if (code == SUBREG)
493 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
494 if (op0 == SUBREG_REG (x))
495 return x;
496 op0 = simplify_gen_subreg (GET_MODE (x), op0,
497 GET_MODE (SUBREG_REG (x)),
498 SUBREG_BYTE (x));
499 return op0 ? op0 : x;
501 break;
503 case RTX_OBJ:
504 if (code == MEM)
506 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
507 if (op0 == XEXP (x, 0))
508 return x;
509 return replace_equiv_address_nv (x, op0);
511 else if (code == LO_SUM)
513 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
514 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
516 /* (lo_sum (high x) y) -> y where x and y have the same base. */
517 if (GET_CODE (op0) == HIGH)
519 rtx base0, base1, offset0, offset1;
520 split_const (XEXP (op0, 0), &base0, &offset0);
521 split_const (op1, &base1, &offset1);
522 if (rtx_equal_p (base0, base1))
523 return op1;
526 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
527 return x;
528 return gen_rtx_LO_SUM (mode, op0, op1);
530 break;
532 default:
533 break;
536 newx = x;
537 fmt = GET_RTX_FORMAT (code);
538 for (i = 0; fmt[i]; i++)
539 switch (fmt[i])
541 case 'E':
542 vec = XVEC (x, i);
543 newvec = XVEC (newx, i);
544 for (j = 0; j < GET_NUM_ELEM (vec); j++)
546 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
547 old_rtx, fn, data);
548 if (op != RTVEC_ELT (vec, j))
550 if (newvec == vec)
552 newvec = shallow_copy_rtvec (vec);
553 if (x == newx)
554 newx = shallow_copy_rtx (x);
555 XVEC (newx, i) = newvec;
557 RTVEC_ELT (newvec, j) = op;
560 break;
562 case 'e':
563 if (XEXP (x, i))
565 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
566 if (op != XEXP (x, i))
568 if (x == newx)
569 newx = shallow_copy_rtx (x);
570 XEXP (newx, i) = op;
573 break;
575 return newx;
578 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
579 resulting RTX. Return a new RTX which is as simplified as possible. */
582 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
584 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
587 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
588 Only handle cases where the truncated value is inherently an rvalue.
590 RTL provides two ways of truncating a value:
592 1. a lowpart subreg. This form is only a truncation when both
593 the outer and inner modes (here MODE and OP_MODE respectively)
594 are scalar integers, and only then when the subreg is used as
595 an rvalue.
597 It is only valid to form such truncating subregs if the
598 truncation requires no action by the target. The onus for
599 proving this is on the creator of the subreg -- e.g. the
600 caller to simplify_subreg or simplify_gen_subreg -- and typically
601 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
603 2. a TRUNCATE. This form handles both scalar and compound integers.
605 The first form is preferred where valid. However, the TRUNCATE
606 handling in simplify_unary_operation turns the second form into the
607 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
608 so it is generally safe to form rvalue truncations using:
610 simplify_gen_unary (TRUNCATE, ...)
612 and leave simplify_unary_operation to work out which representation
613 should be used.
615 Because of the proof requirements on (1), simplify_truncation must
616 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
617 regardless of whether the outer truncation came from a SUBREG or a
618 TRUNCATE. For example, if the caller has proven that an SImode
619 truncation of:
621 (and:DI X Y)
623 is a no-op and can be represented as a subreg, it does not follow
624 that SImode truncations of X and Y are also no-ops. On a target
625 like 64-bit MIPS that requires SImode values to be stored in
626 sign-extended form, an SImode truncation of:
628 (and:DI (reg:DI X) (const_int 63))
630 is trivially a no-op because only the lower 6 bits can be set.
631 However, X is still an arbitrary 64-bit number and so we cannot
632 assume that truncating it too is a no-op. */
634 static rtx
635 simplify_truncation (machine_mode mode, rtx op,
636 machine_mode op_mode)
638 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
639 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
640 gcc_assert (precision <= op_precision);
642 /* Optimize truncations of zero and sign extended values. */
643 if (GET_CODE (op) == ZERO_EXTEND
644 || GET_CODE (op) == SIGN_EXTEND)
646 /* There are three possibilities. If MODE is the same as the
647 origmode, we can omit both the extension and the subreg.
648 If MODE is not larger than the origmode, we can apply the
649 truncation without the extension. Finally, if the outermode
650 is larger than the origmode, we can just extend to the appropriate
651 mode. */
652 machine_mode origmode = GET_MODE (XEXP (op, 0));
653 if (mode == origmode)
654 return XEXP (op, 0);
655 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
656 return simplify_gen_unary (TRUNCATE, mode,
657 XEXP (op, 0), origmode);
658 else
659 return simplify_gen_unary (GET_CODE (op), mode,
660 XEXP (op, 0), origmode);
663 /* If the machine can perform operations in the truncated mode, distribute
664 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
665 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
666 if (1
667 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
668 && (GET_CODE (op) == PLUS
669 || GET_CODE (op) == MINUS
670 || GET_CODE (op) == MULT))
672 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
673 if (op0)
675 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
676 if (op1)
677 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
681 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
682 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
683 the outer subreg is effectively a truncation to the original mode. */
684 if ((GET_CODE (op) == LSHIFTRT
685 || GET_CODE (op) == ASHIFTRT)
686 /* Ensure that OP_MODE is at least twice as wide as MODE
687 to avoid the possibility that an outer LSHIFTRT shifts by more
688 than the sign extension's sign_bit_copies and introduces zeros
689 into the high bits of the result. */
690 && 2 * precision <= op_precision
691 && CONST_INT_P (XEXP (op, 1))
692 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (ASHIFTRT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
699 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if ((GET_CODE (op) == LSHIFTRT
702 || GET_CODE (op) == ASHIFTRT)
703 && CONST_INT_P (XEXP (op, 1))
704 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
705 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
706 && UINTVAL (XEXP (op, 1)) < precision)
707 return simplify_gen_binary (LSHIFTRT, mode,
708 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
710 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
711 to (ashift:QI (x:QI) C), where C is a suitable small constant and
712 the outer subreg is effectively a truncation to the original mode. */
713 if (GET_CODE (op) == ASHIFT
714 && CONST_INT_P (XEXP (op, 1))
715 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
716 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
718 && UINTVAL (XEXP (op, 1)) < precision)
719 return simplify_gen_binary (ASHIFT, mode,
720 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
722 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
723 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
724 and C2. */
725 if (GET_CODE (op) == AND
726 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
727 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
728 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
729 && CONST_INT_P (XEXP (op, 1)))
731 rtx op0 = (XEXP (XEXP (op, 0), 0));
732 rtx shift_op = XEXP (XEXP (op, 0), 1);
733 rtx mask_op = XEXP (op, 1);
734 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
735 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
737 if (shift < precision
738 /* If doing this transform works for an X with all bits set,
739 it works for any X. */
740 && ((GET_MODE_MASK (mode) >> shift) & mask)
741 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
742 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
743 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
745 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
746 return simplify_gen_binary (AND, mode, op0, mask_op);
750 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
751 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
752 changing len. */
753 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
754 && REG_P (XEXP (op, 0))
755 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
756 && CONST_INT_P (XEXP (op, 1))
757 && CONST_INT_P (XEXP (op, 2)))
759 rtx op0 = XEXP (op, 0);
760 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
761 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
762 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
764 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
765 if (op0)
767 pos -= op_precision - precision;
768 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
769 XEXP (op, 1), GEN_INT (pos));
772 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
774 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
775 if (op0)
776 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
777 XEXP (op, 1), XEXP (op, 2));
781 /* Recognize a word extraction from a multi-word subreg. */
782 if ((GET_CODE (op) == LSHIFTRT
783 || GET_CODE (op) == ASHIFTRT)
784 && SCALAR_INT_MODE_P (mode)
785 && SCALAR_INT_MODE_P (op_mode)
786 && precision >= BITS_PER_WORD
787 && 2 * precision <= op_precision
788 && CONST_INT_P (XEXP (op, 1))
789 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
790 && UINTVAL (XEXP (op, 1)) < op_precision)
792 int byte = subreg_lowpart_offset (mode, op_mode);
793 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
794 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
795 (WORDS_BIG_ENDIAN
796 ? byte - shifted_bytes
797 : byte + shifted_bytes));
800 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
801 and try replacing the TRUNCATE and shift with it. Don't do this
802 if the MEM has a mode-dependent address. */
803 if ((GET_CODE (op) == LSHIFTRT
804 || GET_CODE (op) == ASHIFTRT)
805 && SCALAR_INT_MODE_P (op_mode)
806 && MEM_P (XEXP (op, 0))
807 && CONST_INT_P (XEXP (op, 1))
808 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
809 && INTVAL (XEXP (op, 1)) > 0
810 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
811 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
812 MEM_ADDR_SPACE (XEXP (op, 0)))
813 && ! MEM_VOLATILE_P (XEXP (op, 0))
814 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
815 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
817 int byte = subreg_lowpart_offset (mode, op_mode);
818 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
819 return adjust_address_nv (XEXP (op, 0), mode,
820 (WORDS_BIG_ENDIAN
821 ? byte - shifted_bytes
822 : byte + shifted_bytes));
825 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
826 (OP:SI foo:SI) if OP is NEG or ABS. */
827 if ((GET_CODE (op) == ABS
828 || GET_CODE (op) == NEG)
829 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
830 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
831 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
832 return simplify_gen_unary (GET_CODE (op), mode,
833 XEXP (XEXP (op, 0), 0), mode);
835 /* (truncate:A (subreg:B (truncate:C X) 0)) is
836 (truncate:A X). */
837 if (GET_CODE (op) == SUBREG
838 && SCALAR_INT_MODE_P (mode)
839 && SCALAR_INT_MODE_P (op_mode)
840 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
841 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
842 && subreg_lowpart_p (op))
844 rtx inner = XEXP (SUBREG_REG (op), 0);
845 if (GET_MODE_PRECISION (mode)
846 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
847 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
848 else
849 /* If subreg above is paradoxical and C is narrower
850 than A, return (subreg:A (truncate:C X) 0). */
851 return simplify_gen_subreg (mode, SUBREG_REG (op),
852 GET_MODE (SUBREG_REG (op)), 0);
855 /* (truncate:A (truncate:B X)) is (truncate:A X). */
856 if (GET_CODE (op) == TRUNCATE)
857 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
858 GET_MODE (XEXP (op, 0)));
860 return NULL_RTX;
863 /* Try to simplify a unary operation CODE whose output mode is to be
864 MODE with input operand OP whose mode was originally OP_MODE.
865 Return zero if no simplification can be made. */
867 simplify_unary_operation (enum rtx_code code, machine_mode mode,
868 rtx op, machine_mode op_mode)
870 rtx trueop, tem;
872 trueop = avoid_constant_pool_reference (op);
874 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
875 if (tem)
876 return tem;
878 return simplify_unary_operation_1 (code, mode, op);
881 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
882 to be exact. */
884 static bool
885 exact_int_to_float_conversion_p (const_rtx op)
887 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
888 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
889 /* Constants shouldn't reach here. */
890 gcc_assert (op0_mode != VOIDmode);
891 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
892 int in_bits = in_prec;
893 if (HWI_COMPUTABLE_MODE_P (op0_mode))
895 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
896 if (GET_CODE (op) == FLOAT)
897 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
898 else if (GET_CODE (op) == UNSIGNED_FLOAT)
899 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
900 else
901 gcc_unreachable ();
902 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
904 return in_bits <= out_bits;
907 /* Perform some simplifications we can do even if the operands
908 aren't constant. */
909 static rtx
910 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
912 enum rtx_code reversed;
913 rtx temp;
915 switch (code)
917 case NOT:
918 /* (not (not X)) == X. */
919 if (GET_CODE (op) == NOT)
920 return XEXP (op, 0);
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op)
925 && (mode == BImode || STORE_FLAG_VALUE == -1)
926 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
927 return simplify_gen_relational (reversed, mode, VOIDmode,
928 XEXP (op, 0), XEXP (op, 1));
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op) == PLUS
932 && XEXP (op, 1) == constm1_rtx)
933 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
935 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
936 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
937 and MODE_VECTOR_INT. */
938 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
939 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
940 CONSTM1_RTX (mode));
942 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
943 if (GET_CODE (op) == XOR
944 && CONST_INT_P (XEXP (op, 1))
945 && (temp = simplify_unary_operation (NOT, mode,
946 XEXP (op, 1), mode)) != 0)
947 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
949 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
950 if (GET_CODE (op) == PLUS
951 && CONST_INT_P (XEXP (op, 1))
952 && mode_signbit_p (mode, XEXP (op, 1))
953 && (temp = simplify_unary_operation (NOT, mode,
954 XEXP (op, 1), mode)) != 0)
955 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
958 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
959 operands other than 1, but that is not valid. We could do a
960 similar simplification for (not (lshiftrt C X)) where C is
961 just the sign bit, but this doesn't seem common enough to
962 bother with. */
963 if (GET_CODE (op) == ASHIFT
964 && XEXP (op, 0) == const1_rtx)
966 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
967 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
970 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
971 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
972 so we can perform the above simplification. */
973 if (STORE_FLAG_VALUE == -1
974 && GET_CODE (op) == ASHIFTRT
975 && CONST_INT_P (XEXP (op, 1))
976 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
977 return simplify_gen_relational (GE, mode, VOIDmode,
978 XEXP (op, 0), const0_rtx);
981 if (GET_CODE (op) == SUBREG
982 && subreg_lowpart_p (op)
983 && (GET_MODE_SIZE (GET_MODE (op))
984 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
985 && GET_CODE (SUBREG_REG (op)) == ASHIFT
986 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
988 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
989 rtx x;
991 x = gen_rtx_ROTATE (inner_mode,
992 simplify_gen_unary (NOT, inner_mode, const1_rtx,
993 inner_mode),
994 XEXP (SUBREG_REG (op), 1));
995 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
996 if (temp)
997 return temp;
1000 /* Apply De Morgan's laws to reduce number of patterns for machines
1001 with negating logical insns (and-not, nand, etc.). If result has
1002 only one NOT, put it first, since that is how the patterns are
1003 coded. */
1004 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1006 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1007 machine_mode op_mode;
1009 op_mode = GET_MODE (in1);
1010 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1012 op_mode = GET_MODE (in2);
1013 if (op_mode == VOIDmode)
1014 op_mode = mode;
1015 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1017 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1018 std::swap (in1, in2);
1020 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1021 mode, in1, in2);
1024 /* (not (bswap x)) -> (bswap (not x)). */
1025 if (GET_CODE (op) == BSWAP)
1027 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1028 return simplify_gen_unary (BSWAP, mode, x, mode);
1030 break;
1032 case NEG:
1033 /* (neg (neg X)) == X. */
1034 if (GET_CODE (op) == NEG)
1035 return XEXP (op, 0);
1037 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1038 If comparison is not reversible use
1039 x ? y : (neg y). */
1040 if (GET_CODE (op) == IF_THEN_ELSE)
1042 rtx cond = XEXP (op, 0);
1043 rtx true_rtx = XEXP (op, 1);
1044 rtx false_rtx = XEXP (op, 2);
1046 if ((GET_CODE (true_rtx) == NEG
1047 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1048 || (GET_CODE (false_rtx) == NEG
1049 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1051 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1052 temp = reversed_comparison (cond, mode);
1053 else
1055 temp = cond;
1056 std::swap (true_rtx, false_rtx);
1058 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1059 mode, temp, true_rtx, false_rtx);
1063 /* (neg (plus X 1)) can become (not X). */
1064 if (GET_CODE (op) == PLUS
1065 && XEXP (op, 1) == const1_rtx)
1066 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1068 /* Similarly, (neg (not X)) is (plus X 1). */
1069 if (GET_CODE (op) == NOT)
1070 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1071 CONST1_RTX (mode));
1073 /* (neg (minus X Y)) can become (minus Y X). This transformation
1074 isn't safe for modes with signed zeros, since if X and Y are
1075 both +0, (minus Y X) is the same as (minus X Y). If the
1076 rounding mode is towards +infinity (or -infinity) then the two
1077 expressions will be rounded differently. */
1078 if (GET_CODE (op) == MINUS
1079 && !HONOR_SIGNED_ZEROS (mode)
1080 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1081 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1083 if (GET_CODE (op) == PLUS
1084 && !HONOR_SIGNED_ZEROS (mode)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1087 /* (neg (plus A C)) is simplified to (minus -C A). */
1088 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1089 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1091 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1092 if (temp)
1093 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1096 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1097 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1098 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1101 /* (neg (mult A B)) becomes (mult A (neg B)).
1102 This works even for floating-point values. */
1103 if (GET_CODE (op) == MULT
1104 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1106 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1107 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1110 /* NEG commutes with ASHIFT since it is multiplication. Only do
1111 this if we can then eliminate the NEG (e.g., if the operand
1112 is a constant). */
1113 if (GET_CODE (op) == ASHIFT)
1115 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1116 if (temp)
1117 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1120 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1121 C is equal to the width of MODE minus 1. */
1122 if (GET_CODE (op) == ASHIFTRT
1123 && CONST_INT_P (XEXP (op, 1))
1124 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1125 return simplify_gen_binary (LSHIFTRT, mode,
1126 XEXP (op, 0), XEXP (op, 1));
1128 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1129 C is equal to the width of MODE minus 1. */
1130 if (GET_CODE (op) == LSHIFTRT
1131 && CONST_INT_P (XEXP (op, 1))
1132 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1133 return simplify_gen_binary (ASHIFTRT, mode,
1134 XEXP (op, 0), XEXP (op, 1));
1136 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1137 if (GET_CODE (op) == XOR
1138 && XEXP (op, 1) == const1_rtx
1139 && nonzero_bits (XEXP (op, 0), mode) == 1)
1140 return plus_constant (mode, XEXP (op, 0), -1);
1142 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1143 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1144 if (GET_CODE (op) == LT
1145 && XEXP (op, 1) == const0_rtx
1146 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1148 machine_mode inner = GET_MODE (XEXP (op, 0));
1149 int isize = GET_MODE_PRECISION (inner);
1150 if (STORE_FLAG_VALUE == 1)
1152 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1153 GEN_INT (isize - 1));
1154 if (mode == inner)
1155 return temp;
1156 if (GET_MODE_PRECISION (mode) > isize)
1157 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1158 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1160 else if (STORE_FLAG_VALUE == -1)
1162 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1163 GEN_INT (isize - 1));
1164 if (mode == inner)
1165 return temp;
1166 if (GET_MODE_PRECISION (mode) > isize)
1167 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1168 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1171 break;
1173 case TRUNCATE:
1174 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1175 with the umulXi3_highpart patterns. */
1176 if (GET_CODE (op) == LSHIFTRT
1177 && GET_CODE (XEXP (op, 0)) == MULT)
1178 break;
1180 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1182 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1184 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1185 if (temp)
1186 return temp;
1188 /* We can't handle truncation to a partial integer mode here
1189 because we don't know the real bitsize of the partial
1190 integer mode. */
1191 break;
1194 if (GET_MODE (op) != VOIDmode)
1196 temp = simplify_truncation (mode, op, GET_MODE (op));
1197 if (temp)
1198 return temp;
1201 /* If we know that the value is already truncated, we can
1202 replace the TRUNCATE with a SUBREG. */
1203 if (GET_MODE_NUNITS (mode) == 1
1204 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1205 || truncated_to_mode (mode, op)))
1207 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1208 if (temp)
1209 return temp;
1212 /* A truncate of a comparison can be replaced with a subreg if
1213 STORE_FLAG_VALUE permits. This is like the previous test,
1214 but it works even if the comparison is done in a mode larger
1215 than HOST_BITS_PER_WIDE_INT. */
1216 if (HWI_COMPUTABLE_MODE_P (mode)
1217 && COMPARISON_P (op)
1218 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1220 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1221 if (temp)
1222 return temp;
1225 /* A truncate of a memory is just loading the low part of the memory
1226 if we are not changing the meaning of the address. */
1227 if (GET_CODE (op) == MEM
1228 && !VECTOR_MODE_P (mode)
1229 && !MEM_VOLATILE_P (op)
1230 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1232 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1233 if (temp)
1234 return temp;
1237 break;
1239 case FLOAT_TRUNCATE:
1240 if (DECIMAL_FLOAT_MODE_P (mode))
1241 break;
1243 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1244 if (GET_CODE (op) == FLOAT_EXTEND
1245 && GET_MODE (XEXP (op, 0)) == mode)
1246 return XEXP (op, 0);
1248 /* (float_truncate:SF (float_truncate:DF foo:XF))
1249 = (float_truncate:SF foo:XF).
1250 This may eliminate double rounding, so it is unsafe.
1252 (float_truncate:SF (float_extend:XF foo:DF))
1253 = (float_truncate:SF foo:DF).
1255 (float_truncate:DF (float_extend:XF foo:SF))
1256 = (float_extend:DF foo:SF). */
1257 if ((GET_CODE (op) == FLOAT_TRUNCATE
1258 && flag_unsafe_math_optimizations)
1259 || GET_CODE (op) == FLOAT_EXTEND)
1260 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1261 0)))
1262 > GET_MODE_SIZE (mode)
1263 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1264 mode,
1265 XEXP (op, 0), mode);
1267 /* (float_truncate (float x)) is (float x) */
1268 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1269 && (flag_unsafe_math_optimizations
1270 || exact_int_to_float_conversion_p (op)))
1271 return simplify_gen_unary (GET_CODE (op), mode,
1272 XEXP (op, 0),
1273 GET_MODE (XEXP (op, 0)));
1275 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1276 (OP:SF foo:SF) if OP is NEG or ABS. */
1277 if ((GET_CODE (op) == ABS
1278 || GET_CODE (op) == NEG)
1279 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1280 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1281 return simplify_gen_unary (GET_CODE (op), mode,
1282 XEXP (XEXP (op, 0), 0), mode);
1284 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1285 is (float_truncate:SF x). */
1286 if (GET_CODE (op) == SUBREG
1287 && subreg_lowpart_p (op)
1288 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1289 return SUBREG_REG (op);
1290 break;
1292 case FLOAT_EXTEND:
1293 if (DECIMAL_FLOAT_MODE_P (mode))
1294 break;
1296 /* (float_extend (float_extend x)) is (float_extend x)
1298 (float_extend (float x)) is (float x) assuming that double
1299 rounding can't happen.
1301 if (GET_CODE (op) == FLOAT_EXTEND
1302 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1303 && exact_int_to_float_conversion_p (op)))
1304 return simplify_gen_unary (GET_CODE (op), mode,
1305 XEXP (op, 0),
1306 GET_MODE (XEXP (op, 0)));
1308 break;
1310 case ABS:
1311 /* (abs (neg <foo>)) -> (abs <foo>) */
1312 if (GET_CODE (op) == NEG)
1313 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1314 GET_MODE (XEXP (op, 0)));
1316 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1317 do nothing. */
1318 if (GET_MODE (op) == VOIDmode)
1319 break;
1321 /* If operand is something known to be positive, ignore the ABS. */
1322 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1323 || val_signbit_known_clear_p (GET_MODE (op),
1324 nonzero_bits (op, GET_MODE (op))))
1325 return op;
1327 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1328 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1329 return gen_rtx_NEG (mode, op);
1331 break;
1333 case FFS:
1334 /* (ffs (*_extend <X>)) = (ffs <X>) */
1335 if (GET_CODE (op) == SIGN_EXTEND
1336 || GET_CODE (op) == ZERO_EXTEND)
1337 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1338 GET_MODE (XEXP (op, 0)));
1339 break;
1341 case POPCOUNT:
1342 switch (GET_CODE (op))
1344 case BSWAP:
1345 case ZERO_EXTEND:
1346 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1347 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1348 GET_MODE (XEXP (op, 0)));
1350 case ROTATE:
1351 case ROTATERT:
1352 /* Rotations don't affect popcount. */
1353 if (!side_effects_p (XEXP (op, 1)))
1354 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1355 GET_MODE (XEXP (op, 0)));
1356 break;
1358 default:
1359 break;
1361 break;
1363 case PARITY:
1364 switch (GET_CODE (op))
1366 case NOT:
1367 case BSWAP:
1368 case ZERO_EXTEND:
1369 case SIGN_EXTEND:
1370 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1371 GET_MODE (XEXP (op, 0)));
1373 case ROTATE:
1374 case ROTATERT:
1375 /* Rotations don't affect parity. */
1376 if (!side_effects_p (XEXP (op, 1)))
1377 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1378 GET_MODE (XEXP (op, 0)));
1379 break;
1381 default:
1382 break;
1384 break;
1386 case BSWAP:
1387 /* (bswap (bswap x)) -> x. */
1388 if (GET_CODE (op) == BSWAP)
1389 return XEXP (op, 0);
1390 break;
1392 case FLOAT:
1393 /* (float (sign_extend <X>)) = (float <X>). */
1394 if (GET_CODE (op) == SIGN_EXTEND)
1395 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1396 GET_MODE (XEXP (op, 0)));
1397 break;
1399 case SIGN_EXTEND:
1400 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1401 becomes just the MINUS if its mode is MODE. This allows
1402 folding switch statements on machines using casesi (such as
1403 the VAX). */
1404 if (GET_CODE (op) == TRUNCATE
1405 && GET_MODE (XEXP (op, 0)) == mode
1406 && GET_CODE (XEXP (op, 0)) == MINUS
1407 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1408 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1409 return XEXP (op, 0);
1411 /* Extending a widening multiplication should be canonicalized to
1412 a wider widening multiplication. */
1413 if (GET_CODE (op) == MULT)
1415 rtx lhs = XEXP (op, 0);
1416 rtx rhs = XEXP (op, 1);
1417 enum rtx_code lcode = GET_CODE (lhs);
1418 enum rtx_code rcode = GET_CODE (rhs);
1420 /* Widening multiplies usually extend both operands, but sometimes
1421 they use a shift to extract a portion of a register. */
1422 if ((lcode == SIGN_EXTEND
1423 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1424 && (rcode == SIGN_EXTEND
1425 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1427 machine_mode lmode = GET_MODE (lhs);
1428 machine_mode rmode = GET_MODE (rhs);
1429 int bits;
1431 if (lcode == ASHIFTRT)
1432 /* Number of bits not shifted off the end. */
1433 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1434 else /* lcode == SIGN_EXTEND */
1435 /* Size of inner mode. */
1436 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1438 if (rcode == ASHIFTRT)
1439 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1440 else /* rcode == SIGN_EXTEND */
1441 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1443 /* We can only widen multiplies if the result is mathematiclly
1444 equivalent. I.e. if overflow was impossible. */
1445 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1446 return simplify_gen_binary
1447 (MULT, mode,
1448 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1449 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1453 /* Check for a sign extension of a subreg of a promoted
1454 variable, where the promotion is sign-extended, and the
1455 target mode is the same as the variable's promotion. */
1456 if (GET_CODE (op) == SUBREG
1457 && SUBREG_PROMOTED_VAR_P (op)
1458 && SUBREG_PROMOTED_SIGNED_P (op)
1459 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1461 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1462 if (temp)
1463 return temp;
1466 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1467 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1468 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1470 gcc_assert (GET_MODE_PRECISION (mode)
1471 > GET_MODE_PRECISION (GET_MODE (op)));
1472 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1473 GET_MODE (XEXP (op, 0)));
1476 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1477 is (sign_extend:M (subreg:O <X>)) if there is mode with
1478 GET_MODE_BITSIZE (N) - I bits.
1479 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1480 is similarly (zero_extend:M (subreg:O <X>)). */
1481 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1482 && GET_CODE (XEXP (op, 0)) == ASHIFT
1483 && CONST_INT_P (XEXP (op, 1))
1484 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1485 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1487 machine_mode tmode
1488 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1489 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1490 gcc_assert (GET_MODE_BITSIZE (mode)
1491 > GET_MODE_BITSIZE (GET_MODE (op)));
1492 if (tmode != BLKmode)
1494 rtx inner =
1495 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1496 if (inner)
1497 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1498 ? SIGN_EXTEND : ZERO_EXTEND,
1499 mode, inner, tmode);
1503 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1504 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1505 if (GET_CODE (op) == LSHIFTRT
1506 && CONST_INT_P (XEXP (op, 1))
1507 && XEXP (op, 1) != const0_rtx)
1508 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1510 #if defined(POINTERS_EXTEND_UNSIGNED)
1511 /* As we do not know which address space the pointer is referring to,
1512 we can do this only if the target does not support different pointer
1513 or address modes depending on the address space. */
1514 if (target_default_pointer_address_modes_p ()
1515 && ! POINTERS_EXTEND_UNSIGNED
1516 && mode == Pmode && GET_MODE (op) == ptr_mode
1517 && (CONSTANT_P (op)
1518 || (GET_CODE (op) == SUBREG
1519 && REG_P (SUBREG_REG (op))
1520 && REG_POINTER (SUBREG_REG (op))
1521 && GET_MODE (SUBREG_REG (op)) == Pmode))
1522 && !targetm.have_ptr_extend ())
1524 temp
1525 = convert_memory_address_addr_space_1 (Pmode, op,
1526 ADDR_SPACE_GENERIC, false,
1527 true);
1528 if (temp)
1529 return temp;
1531 #endif
1532 break;
1534 case ZERO_EXTEND:
1535 /* Check for a zero extension of a subreg of a promoted
1536 variable, where the promotion is zero-extended, and the
1537 target mode is the same as the variable's promotion. */
1538 if (GET_CODE (op) == SUBREG
1539 && SUBREG_PROMOTED_VAR_P (op)
1540 && SUBREG_PROMOTED_UNSIGNED_P (op)
1541 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1543 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1544 if (temp)
1545 return temp;
1548 /* Extending a widening multiplication should be canonicalized to
1549 a wider widening multiplication. */
1550 if (GET_CODE (op) == MULT)
1552 rtx lhs = XEXP (op, 0);
1553 rtx rhs = XEXP (op, 1);
1554 enum rtx_code lcode = GET_CODE (lhs);
1555 enum rtx_code rcode = GET_CODE (rhs);
1557 /* Widening multiplies usually extend both operands, but sometimes
1558 they use a shift to extract a portion of a register. */
1559 if ((lcode == ZERO_EXTEND
1560 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1561 && (rcode == ZERO_EXTEND
1562 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1564 machine_mode lmode = GET_MODE (lhs);
1565 machine_mode rmode = GET_MODE (rhs);
1566 int bits;
1568 if (lcode == LSHIFTRT)
1569 /* Number of bits not shifted off the end. */
1570 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1571 else /* lcode == ZERO_EXTEND */
1572 /* Size of inner mode. */
1573 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1575 if (rcode == LSHIFTRT)
1576 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1577 else /* rcode == ZERO_EXTEND */
1578 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1580 /* We can only widen multiplies if the result is mathematiclly
1581 equivalent. I.e. if overflow was impossible. */
1582 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1583 return simplify_gen_binary
1584 (MULT, mode,
1585 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1586 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1590 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1591 if (GET_CODE (op) == ZERO_EXTEND)
1592 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1593 GET_MODE (XEXP (op, 0)));
1595 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1596 is (zero_extend:M (subreg:O <X>)) if there is mode with
1597 GET_MODE_PRECISION (N) - I bits. */
1598 if (GET_CODE (op) == LSHIFTRT
1599 && GET_CODE (XEXP (op, 0)) == ASHIFT
1600 && CONST_INT_P (XEXP (op, 1))
1601 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1602 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1604 machine_mode tmode
1605 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1606 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1607 if (tmode != BLKmode)
1609 rtx inner =
1610 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1611 if (inner)
1612 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1616 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1617 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1618 of mode N. E.g.
1619 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1620 (and:SI (reg:SI) (const_int 63)). */
1621 if (GET_CODE (op) == SUBREG
1622 && GET_MODE_PRECISION (GET_MODE (op))
1623 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1624 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1625 <= HOST_BITS_PER_WIDE_INT
1626 && GET_MODE_PRECISION (mode)
1627 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1628 && subreg_lowpart_p (op)
1629 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1630 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1632 if (GET_MODE_PRECISION (mode)
1633 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1634 return SUBREG_REG (op);
1635 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1636 GET_MODE (SUBREG_REG (op)));
1639 #if defined(POINTERS_EXTEND_UNSIGNED)
1640 /* As we do not know which address space the pointer is referring to,
1641 we can do this only if the target does not support different pointer
1642 or address modes depending on the address space. */
1643 if (target_default_pointer_address_modes_p ()
1644 && POINTERS_EXTEND_UNSIGNED > 0
1645 && mode == Pmode && GET_MODE (op) == ptr_mode
1646 && (CONSTANT_P (op)
1647 || (GET_CODE (op) == SUBREG
1648 && REG_P (SUBREG_REG (op))
1649 && REG_POINTER (SUBREG_REG (op))
1650 && GET_MODE (SUBREG_REG (op)) == Pmode))
1651 && !targetm.have_ptr_extend ())
1653 temp
1654 = convert_memory_address_addr_space_1 (Pmode, op,
1655 ADDR_SPACE_GENERIC, false,
1656 true);
1657 if (temp)
1658 return temp;
1660 #endif
1661 break;
1663 default:
1664 break;
1667 return 0;
1670 /* Try to compute the value of a unary operation CODE whose output mode is to
1671 be MODE with input operand OP whose mode was originally OP_MODE.
1672 Return zero if the value cannot be computed. */
1674 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1675 rtx op, machine_mode op_mode)
1677 unsigned int width = GET_MODE_PRECISION (mode);
1679 if (code == VEC_DUPLICATE)
1681 gcc_assert (VECTOR_MODE_P (mode));
1682 if (GET_MODE (op) != VOIDmode)
1684 if (!VECTOR_MODE_P (GET_MODE (op)))
1685 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1686 else
1687 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1688 (GET_MODE (op)));
1690 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1691 || GET_CODE (op) == CONST_VECTOR)
1693 int elt_size = GET_MODE_UNIT_SIZE (mode);
1694 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1695 rtvec v = rtvec_alloc (n_elts);
1696 unsigned int i;
1698 if (GET_CODE (op) != CONST_VECTOR)
1699 for (i = 0; i < n_elts; i++)
1700 RTVEC_ELT (v, i) = op;
1701 else
1703 machine_mode inmode = GET_MODE (op);
1704 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1705 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1707 gcc_assert (in_n_elts < n_elts);
1708 gcc_assert ((n_elts % in_n_elts) == 0);
1709 for (i = 0; i < n_elts; i++)
1710 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1712 return gen_rtx_CONST_VECTOR (mode, v);
1716 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1718 int elt_size = GET_MODE_UNIT_SIZE (mode);
1719 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1720 machine_mode opmode = GET_MODE (op);
1721 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1722 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1723 rtvec v = rtvec_alloc (n_elts);
1724 unsigned int i;
1726 gcc_assert (op_n_elts == n_elts);
1727 for (i = 0; i < n_elts; i++)
1729 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1730 CONST_VECTOR_ELT (op, i),
1731 GET_MODE_INNER (opmode));
1732 if (!x)
1733 return 0;
1734 RTVEC_ELT (v, i) = x;
1736 return gen_rtx_CONST_VECTOR (mode, v);
1739 /* The order of these tests is critical so that, for example, we don't
1740 check the wrong mode (input vs. output) for a conversion operation,
1741 such as FIX. At some point, this should be simplified. */
1743 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1745 REAL_VALUE_TYPE d;
1747 if (op_mode == VOIDmode)
1749 /* CONST_INT have VOIDmode as the mode. We assume that all
1750 the bits of the constant are significant, though, this is
1751 a dangerous assumption as many times CONST_INTs are
1752 created and used with garbage in the bits outside of the
1753 precision of the implied mode of the const_int. */
1754 op_mode = MAX_MODE_INT;
1757 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1759 /* Avoid the folding if flag_signaling_nans is on and
1760 operand is a signaling NaN. */
1761 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1762 return 0;
1764 d = real_value_truncate (mode, d);
1765 return const_double_from_real_value (d, mode);
1767 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1769 REAL_VALUE_TYPE d;
1771 if (op_mode == VOIDmode)
1773 /* CONST_INT have VOIDmode as the mode. We assume that all
1774 the bits of the constant are significant, though, this is
1775 a dangerous assumption as many times CONST_INTs are
1776 created and used with garbage in the bits outside of the
1777 precision of the implied mode of the const_int. */
1778 op_mode = MAX_MODE_INT;
1781 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1783 /* Avoid the folding if flag_signaling_nans is on and
1784 operand is a signaling NaN. */
1785 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1786 return 0;
1788 d = real_value_truncate (mode, d);
1789 return const_double_from_real_value (d, mode);
1792 if (CONST_SCALAR_INT_P (op) && width > 0)
1794 wide_int result;
1795 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1796 rtx_mode_t op0 = rtx_mode_t (op, imode);
1797 int int_value;
1799 #if TARGET_SUPPORTS_WIDE_INT == 0
1800 /* This assert keeps the simplification from producing a result
1801 that cannot be represented in a CONST_DOUBLE but a lot of
1802 upstream callers expect that this function never fails to
1803 simplify something and so you if you added this to the test
1804 above the code would die later anyway. If this assert
1805 happens, you just need to make the port support wide int. */
1806 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1807 #endif
1809 switch (code)
1811 case NOT:
1812 result = wi::bit_not (op0);
1813 break;
1815 case NEG:
1816 result = wi::neg (op0);
1817 break;
1819 case ABS:
1820 result = wi::abs (op0);
1821 break;
1823 case FFS:
1824 result = wi::shwi (wi::ffs (op0), mode);
1825 break;
1827 case CLZ:
1828 if (wi::ne_p (op0, 0))
1829 int_value = wi::clz (op0);
1830 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1831 int_value = GET_MODE_PRECISION (mode);
1832 result = wi::shwi (int_value, mode);
1833 break;
1835 case CLRSB:
1836 result = wi::shwi (wi::clrsb (op0), mode);
1837 break;
1839 case CTZ:
1840 if (wi::ne_p (op0, 0))
1841 int_value = wi::ctz (op0);
1842 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1843 int_value = GET_MODE_PRECISION (mode);
1844 result = wi::shwi (int_value, mode);
1845 break;
1847 case POPCOUNT:
1848 result = wi::shwi (wi::popcount (op0), mode);
1849 break;
1851 case PARITY:
1852 result = wi::shwi (wi::parity (op0), mode);
1853 break;
1855 case BSWAP:
1856 result = wide_int (op0).bswap ();
1857 break;
1859 case TRUNCATE:
1860 case ZERO_EXTEND:
1861 result = wide_int::from (op0, width, UNSIGNED);
1862 break;
1864 case SIGN_EXTEND:
1865 result = wide_int::from (op0, width, SIGNED);
1866 break;
1868 case SQRT:
1869 default:
1870 return 0;
1873 return immed_wide_int_const (result, mode);
1876 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1877 && SCALAR_FLOAT_MODE_P (mode)
1878 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1880 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1881 switch (code)
1883 case SQRT:
1884 return 0;
1885 case ABS:
1886 d = real_value_abs (&d);
1887 break;
1888 case NEG:
1889 d = real_value_negate (&d);
1890 break;
1891 case FLOAT_TRUNCATE:
1892 /* Don't perform the operation if flag_signaling_nans is on
1893 and the operand is a signaling NaN. */
1894 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1895 return NULL_RTX;
1896 d = real_value_truncate (mode, d);
1897 break;
1898 case FLOAT_EXTEND:
1899 /* Don't perform the operation if flag_signaling_nans is on
1900 and the operand is a signaling NaN. */
1901 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1902 return NULL_RTX;
1903 /* All this does is change the mode, unless changing
1904 mode class. */
1905 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1906 real_convert (&d, mode, &d);
1907 break;
1908 case FIX:
1909 /* Don't perform the operation if flag_signaling_nans is on
1910 and the operand is a signaling NaN. */
1911 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1912 return NULL_RTX;
1913 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1914 break;
1915 case NOT:
1917 long tmp[4];
1918 int i;
1920 real_to_target (tmp, &d, GET_MODE (op));
1921 for (i = 0; i < 4; i++)
1922 tmp[i] = ~tmp[i];
1923 real_from_target (&d, tmp, mode);
1924 break;
1926 default:
1927 gcc_unreachable ();
1929 return const_double_from_real_value (d, mode);
1931 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1932 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1933 && GET_MODE_CLASS (mode) == MODE_INT
1934 && width > 0)
1936 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1937 operators are intentionally left unspecified (to ease implementation
1938 by target backends), for consistency, this routine implements the
1939 same semantics for constant folding as used by the middle-end. */
1941 /* This was formerly used only for non-IEEE float.
1942 eggert@twinsun.com says it is safe for IEEE also. */
1943 REAL_VALUE_TYPE t;
1944 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1945 wide_int wmax, wmin;
1946 /* This is part of the abi to real_to_integer, but we check
1947 things before making this call. */
1948 bool fail;
1950 switch (code)
1952 case FIX:
1953 if (REAL_VALUE_ISNAN (*x))
1954 return const0_rtx;
1956 /* Test against the signed upper bound. */
1957 wmax = wi::max_value (width, SIGNED);
1958 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1959 if (real_less (&t, x))
1960 return immed_wide_int_const (wmax, mode);
1962 /* Test against the signed lower bound. */
1963 wmin = wi::min_value (width, SIGNED);
1964 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1965 if (real_less (x, &t))
1966 return immed_wide_int_const (wmin, mode);
1968 return immed_wide_int_const (real_to_integer (x, &fail, width),
1969 mode);
1971 case UNSIGNED_FIX:
1972 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1973 return const0_rtx;
1975 /* Test against the unsigned upper bound. */
1976 wmax = wi::max_value (width, UNSIGNED);
1977 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1978 if (real_less (&t, x))
1979 return immed_wide_int_const (wmax, mode);
1981 return immed_wide_int_const (real_to_integer (x, &fail, width),
1982 mode);
1984 default:
1985 gcc_unreachable ();
1989 return NULL_RTX;
1992 /* Subroutine of simplify_binary_operation to simplify a binary operation
1993 CODE that can commute with byte swapping, with result mode MODE and
1994 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1995 Return zero if no simplification or canonicalization is possible. */
1997 static rtx
1998 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1999 rtx op0, rtx op1)
2001 rtx tem;
2003 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2004 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2006 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2007 simplify_gen_unary (BSWAP, mode, op1, mode));
2008 return simplify_gen_unary (BSWAP, mode, tem, mode);
2011 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2012 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2014 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2015 return simplify_gen_unary (BSWAP, mode, tem, mode);
2018 return NULL_RTX;
2021 /* Subroutine of simplify_binary_operation to simplify a commutative,
2022 associative binary operation CODE with result mode MODE, operating
2023 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2024 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2025 canonicalization is possible. */
2027 static rtx
2028 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2029 rtx op0, rtx op1)
2031 rtx tem;
2033 /* Linearize the operator to the left. */
2034 if (GET_CODE (op1) == code)
2036 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2037 if (GET_CODE (op0) == code)
2039 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2040 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2043 /* "a op (b op c)" becomes "(b op c) op a". */
2044 if (! swap_commutative_operands_p (op1, op0))
2045 return simplify_gen_binary (code, mode, op1, op0);
2047 std::swap (op0, op1);
2050 if (GET_CODE (op0) == code)
2052 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2053 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2055 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2056 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2059 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2060 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2061 if (tem != 0)
2062 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2064 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2065 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2066 if (tem != 0)
2067 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2070 return 0;
2074 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2075 and OP1. Return 0 if no simplification is possible.
2077 Don't use this for relational operations such as EQ or LT.
2078 Use simplify_relational_operation instead. */
2080 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2081 rtx op0, rtx op1)
2083 rtx trueop0, trueop1;
2084 rtx tem;
2086 /* Relational operations don't work here. We must know the mode
2087 of the operands in order to do the comparison correctly.
2088 Assuming a full word can give incorrect results.
2089 Consider comparing 128 with -128 in QImode. */
2090 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2091 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2093 /* Make sure the constant is second. */
2094 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2095 && swap_commutative_operands_p (op0, op1))
2096 std::swap (op0, op1);
2098 trueop0 = avoid_constant_pool_reference (op0);
2099 trueop1 = avoid_constant_pool_reference (op1);
2101 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2102 if (tem)
2103 return tem;
2104 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2106 if (tem)
2107 return tem;
2109 /* If the above steps did not result in a simplification and op0 or op1
2110 were constant pool references, use the referenced constants directly. */
2111 if (trueop0 != op0 || trueop1 != op1)
2112 return simplify_gen_binary (code, mode, trueop0, trueop1);
2114 return NULL_RTX;
2117 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2118 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2119 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2120 actual constants. */
2122 static rtx
2123 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2124 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2126 rtx tem, reversed, opleft, opright;
2127 HOST_WIDE_INT val;
2128 unsigned int width = GET_MODE_PRECISION (mode);
2130 /* Even if we can't compute a constant result,
2131 there are some cases worth simplifying. */
2133 switch (code)
2135 case PLUS:
2136 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2137 when x is NaN, infinite, or finite and nonzero. They aren't
2138 when x is -0 and the rounding mode is not towards -infinity,
2139 since (-0) + 0 is then 0. */
2140 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2141 return op0;
2143 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2144 transformations are safe even for IEEE. */
2145 if (GET_CODE (op0) == NEG)
2146 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2147 else if (GET_CODE (op1) == NEG)
2148 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2150 /* (~a) + 1 -> -a */
2151 if (INTEGRAL_MODE_P (mode)
2152 && GET_CODE (op0) == NOT
2153 && trueop1 == const1_rtx)
2154 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2156 /* Handle both-operands-constant cases. We can only add
2157 CONST_INTs to constants since the sum of relocatable symbols
2158 can't be handled by most assemblers. Don't add CONST_INT
2159 to CONST_INT since overflow won't be computed properly if wider
2160 than HOST_BITS_PER_WIDE_INT. */
2162 if ((GET_CODE (op0) == CONST
2163 || GET_CODE (op0) == SYMBOL_REF
2164 || GET_CODE (op0) == LABEL_REF)
2165 && CONST_INT_P (op1))
2166 return plus_constant (mode, op0, INTVAL (op1));
2167 else if ((GET_CODE (op1) == CONST
2168 || GET_CODE (op1) == SYMBOL_REF
2169 || GET_CODE (op1) == LABEL_REF)
2170 && CONST_INT_P (op0))
2171 return plus_constant (mode, op1, INTVAL (op0));
2173 /* See if this is something like X * C - X or vice versa or
2174 if the multiplication is written as a shift. If so, we can
2175 distribute and make a new multiply, shift, or maybe just
2176 have X (if C is 2 in the example above). But don't make
2177 something more expensive than we had before. */
2179 if (SCALAR_INT_MODE_P (mode))
2181 rtx lhs = op0, rhs = op1;
2183 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2184 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2186 if (GET_CODE (lhs) == NEG)
2188 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2189 lhs = XEXP (lhs, 0);
2191 else if (GET_CODE (lhs) == MULT
2192 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2194 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2195 lhs = XEXP (lhs, 0);
2197 else if (GET_CODE (lhs) == ASHIFT
2198 && CONST_INT_P (XEXP (lhs, 1))
2199 && INTVAL (XEXP (lhs, 1)) >= 0
2200 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2202 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2203 GET_MODE_PRECISION (mode));
2204 lhs = XEXP (lhs, 0);
2207 if (GET_CODE (rhs) == NEG)
2209 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2210 rhs = XEXP (rhs, 0);
2212 else if (GET_CODE (rhs) == MULT
2213 && CONST_INT_P (XEXP (rhs, 1)))
2215 coeff1 = rtx_mode_t (XEXP (rhs, 1), mode);
2216 rhs = XEXP (rhs, 0);
2218 else if (GET_CODE (rhs) == ASHIFT
2219 && CONST_INT_P (XEXP (rhs, 1))
2220 && INTVAL (XEXP (rhs, 1)) >= 0
2221 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2223 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2224 GET_MODE_PRECISION (mode));
2225 rhs = XEXP (rhs, 0);
2228 if (rtx_equal_p (lhs, rhs))
2230 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2231 rtx coeff;
2232 bool speed = optimize_function_for_speed_p (cfun);
2234 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2236 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2237 return (set_src_cost (tem, mode, speed)
2238 <= set_src_cost (orig, mode, speed) ? tem : 0);
2242 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2243 if (CONST_SCALAR_INT_P (op1)
2244 && GET_CODE (op0) == XOR
2245 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2246 && mode_signbit_p (mode, op1))
2247 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2248 simplify_gen_binary (XOR, mode, op1,
2249 XEXP (op0, 1)));
2251 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2252 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2253 && GET_CODE (op0) == MULT
2254 && GET_CODE (XEXP (op0, 0)) == NEG)
2256 rtx in1, in2;
2258 in1 = XEXP (XEXP (op0, 0), 0);
2259 in2 = XEXP (op0, 1);
2260 return simplify_gen_binary (MINUS, mode, op1,
2261 simplify_gen_binary (MULT, mode,
2262 in1, in2));
2265 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2266 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2267 is 1. */
2268 if (COMPARISON_P (op0)
2269 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2270 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2271 && (reversed = reversed_comparison (op0, mode)))
2272 return
2273 simplify_gen_unary (NEG, mode, reversed, mode);
2275 /* If one of the operands is a PLUS or a MINUS, see if we can
2276 simplify this by the associative law.
2277 Don't use the associative law for floating point.
2278 The inaccuracy makes it nonassociative,
2279 and subtle programs can break if operations are associated. */
2281 if (INTEGRAL_MODE_P (mode)
2282 && (plus_minus_operand_p (op0)
2283 || plus_minus_operand_p (op1))
2284 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2285 return tem;
2287 /* Reassociate floating point addition only when the user
2288 specifies associative math operations. */
2289 if (FLOAT_MODE_P (mode)
2290 && flag_associative_math)
2292 tem = simplify_associative_operation (code, mode, op0, op1);
2293 if (tem)
2294 return tem;
2296 break;
2298 case COMPARE:
2299 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2300 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2301 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2302 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2304 rtx xop00 = XEXP (op0, 0);
2305 rtx xop10 = XEXP (op1, 0);
2307 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2308 return xop00;
2310 if (REG_P (xop00) && REG_P (xop10)
2311 && REGNO (xop00) == REGNO (xop10)
2312 && GET_MODE (xop00) == mode
2313 && GET_MODE (xop10) == mode
2314 && GET_MODE_CLASS (mode) == MODE_CC)
2315 return xop00;
2317 break;
2319 case MINUS:
2320 /* We can't assume x-x is 0 even with non-IEEE floating point,
2321 but since it is zero except in very strange circumstances, we
2322 will treat it as zero with -ffinite-math-only. */
2323 if (rtx_equal_p (trueop0, trueop1)
2324 && ! side_effects_p (op0)
2325 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2326 return CONST0_RTX (mode);
2328 /* Change subtraction from zero into negation. (0 - x) is the
2329 same as -x when x is NaN, infinite, or finite and nonzero.
2330 But if the mode has signed zeros, and does not round towards
2331 -infinity, then 0 - 0 is 0, not -0. */
2332 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2333 return simplify_gen_unary (NEG, mode, op1, mode);
2335 /* (-1 - a) is ~a, unless the expression contains symbolic
2336 constants, in which case not retaining additions and
2337 subtractions could cause invalid assembly to be produced. */
2338 if (trueop0 == constm1_rtx
2339 && !contains_symbolic_reference_p (op1))
2340 return simplify_gen_unary (NOT, mode, op1, mode);
2342 /* Subtracting 0 has no effect unless the mode has signed zeros
2343 and supports rounding towards -infinity. In such a case,
2344 0 - 0 is -0. */
2345 if (!(HONOR_SIGNED_ZEROS (mode)
2346 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2347 && trueop1 == CONST0_RTX (mode))
2348 return op0;
2350 /* See if this is something like X * C - X or vice versa or
2351 if the multiplication is written as a shift. If so, we can
2352 distribute and make a new multiply, shift, or maybe just
2353 have X (if C is 2 in the example above). But don't make
2354 something more expensive than we had before. */
2356 if (SCALAR_INT_MODE_P (mode))
2358 rtx lhs = op0, rhs = op1;
2360 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2361 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2363 if (GET_CODE (lhs) == NEG)
2365 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2366 lhs = XEXP (lhs, 0);
2368 else if (GET_CODE (lhs) == MULT
2369 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2371 coeff0 = rtx_mode_t (XEXP (lhs, 1), mode);
2372 lhs = XEXP (lhs, 0);
2374 else if (GET_CODE (lhs) == ASHIFT
2375 && CONST_INT_P (XEXP (lhs, 1))
2376 && INTVAL (XEXP (lhs, 1)) >= 0
2377 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2379 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2380 GET_MODE_PRECISION (mode));
2381 lhs = XEXP (lhs, 0);
2384 if (GET_CODE (rhs) == NEG)
2386 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2387 rhs = XEXP (rhs, 0);
2389 else if (GET_CODE (rhs) == MULT
2390 && CONST_INT_P (XEXP (rhs, 1)))
2392 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), mode));
2393 rhs = XEXP (rhs, 0);
2395 else if (GET_CODE (rhs) == ASHIFT
2396 && CONST_INT_P (XEXP (rhs, 1))
2397 && INTVAL (XEXP (rhs, 1)) >= 0
2398 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2400 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2401 GET_MODE_PRECISION (mode));
2402 negcoeff1 = -negcoeff1;
2403 rhs = XEXP (rhs, 0);
2406 if (rtx_equal_p (lhs, rhs))
2408 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2409 rtx coeff;
2410 bool speed = optimize_function_for_speed_p (cfun);
2412 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2414 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2415 return (set_src_cost (tem, mode, speed)
2416 <= set_src_cost (orig, mode, speed) ? tem : 0);
2420 /* (a - (-b)) -> (a + b). True even for IEEE. */
2421 if (GET_CODE (op1) == NEG)
2422 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2424 /* (-x - c) may be simplified as (-c - x). */
2425 if (GET_CODE (op0) == NEG
2426 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2428 tem = simplify_unary_operation (NEG, mode, op1, mode);
2429 if (tem)
2430 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2433 /* Don't let a relocatable value get a negative coeff. */
2434 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2435 return simplify_gen_binary (PLUS, mode,
2436 op0,
2437 neg_const_int (mode, op1));
2439 /* (x - (x & y)) -> (x & ~y) */
2440 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2442 if (rtx_equal_p (op0, XEXP (op1, 0)))
2444 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2445 GET_MODE (XEXP (op1, 1)));
2446 return simplify_gen_binary (AND, mode, op0, tem);
2448 if (rtx_equal_p (op0, XEXP (op1, 1)))
2450 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2451 GET_MODE (XEXP (op1, 0)));
2452 return simplify_gen_binary (AND, mode, op0, tem);
2456 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2457 by reversing the comparison code if valid. */
2458 if (STORE_FLAG_VALUE == 1
2459 && trueop0 == const1_rtx
2460 && COMPARISON_P (op1)
2461 && (reversed = reversed_comparison (op1, mode)))
2462 return reversed;
2464 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2465 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2466 && GET_CODE (op1) == MULT
2467 && GET_CODE (XEXP (op1, 0)) == NEG)
2469 rtx in1, in2;
2471 in1 = XEXP (XEXP (op1, 0), 0);
2472 in2 = XEXP (op1, 1);
2473 return simplify_gen_binary (PLUS, mode,
2474 simplify_gen_binary (MULT, mode,
2475 in1, in2),
2476 op0);
2479 /* Canonicalize (minus (neg A) (mult B C)) to
2480 (minus (mult (neg B) C) A). */
2481 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2482 && GET_CODE (op1) == MULT
2483 && GET_CODE (op0) == NEG)
2485 rtx in1, in2;
2487 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2488 in2 = XEXP (op1, 1);
2489 return simplify_gen_binary (MINUS, mode,
2490 simplify_gen_binary (MULT, mode,
2491 in1, in2),
2492 XEXP (op0, 0));
2495 /* If one of the operands is a PLUS or a MINUS, see if we can
2496 simplify this by the associative law. This will, for example,
2497 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2498 Don't use the associative law for floating point.
2499 The inaccuracy makes it nonassociative,
2500 and subtle programs can break if operations are associated. */
2502 if (INTEGRAL_MODE_P (mode)
2503 && (plus_minus_operand_p (op0)
2504 || plus_minus_operand_p (op1))
2505 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2506 return tem;
2507 break;
2509 case MULT:
2510 if (trueop1 == constm1_rtx)
2511 return simplify_gen_unary (NEG, mode, op0, mode);
2513 if (GET_CODE (op0) == NEG)
2515 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2516 /* If op1 is a MULT as well and simplify_unary_operation
2517 just moved the NEG to the second operand, simplify_gen_binary
2518 below could through simplify_associative_operation move
2519 the NEG around again and recurse endlessly. */
2520 if (temp
2521 && GET_CODE (op1) == MULT
2522 && GET_CODE (temp) == MULT
2523 && XEXP (op1, 0) == XEXP (temp, 0)
2524 && GET_CODE (XEXP (temp, 1)) == NEG
2525 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2526 temp = NULL_RTX;
2527 if (temp)
2528 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2530 if (GET_CODE (op1) == NEG)
2532 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2533 /* If op0 is a MULT as well and simplify_unary_operation
2534 just moved the NEG to the second operand, simplify_gen_binary
2535 below could through simplify_associative_operation move
2536 the NEG around again and recurse endlessly. */
2537 if (temp
2538 && GET_CODE (op0) == MULT
2539 && GET_CODE (temp) == MULT
2540 && XEXP (op0, 0) == XEXP (temp, 0)
2541 && GET_CODE (XEXP (temp, 1)) == NEG
2542 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2543 temp = NULL_RTX;
2544 if (temp)
2545 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2548 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2549 x is NaN, since x * 0 is then also NaN. Nor is it valid
2550 when the mode has signed zeros, since multiplying a negative
2551 number by 0 will give -0, not 0. */
2552 if (!HONOR_NANS (mode)
2553 && !HONOR_SIGNED_ZEROS (mode)
2554 && trueop1 == CONST0_RTX (mode)
2555 && ! side_effects_p (op0))
2556 return op1;
2558 /* In IEEE floating point, x*1 is not equivalent to x for
2559 signalling NaNs. */
2560 if (!HONOR_SNANS (mode)
2561 && trueop1 == CONST1_RTX (mode))
2562 return op0;
2564 /* Convert multiply by constant power of two into shift. */
2565 if (CONST_SCALAR_INT_P (trueop1))
2567 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2568 if (val >= 0)
2569 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2572 /* x*2 is x+x and x*(-1) is -x */
2573 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2574 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2575 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2576 && GET_MODE (op0) == mode)
2578 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2580 if (real_equal (d1, &dconst2))
2581 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2583 if (!HONOR_SNANS (mode)
2584 && real_equal (d1, &dconstm1))
2585 return simplify_gen_unary (NEG, mode, op0, mode);
2588 /* Optimize -x * -x as x * x. */
2589 if (FLOAT_MODE_P (mode)
2590 && GET_CODE (op0) == NEG
2591 && GET_CODE (op1) == NEG
2592 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2593 && !side_effects_p (XEXP (op0, 0)))
2594 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2596 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2597 if (SCALAR_FLOAT_MODE_P (mode)
2598 && GET_CODE (op0) == ABS
2599 && GET_CODE (op1) == ABS
2600 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2601 && !side_effects_p (XEXP (op0, 0)))
2602 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2604 /* Reassociate multiplication, but for floating point MULTs
2605 only when the user specifies unsafe math optimizations. */
2606 if (! FLOAT_MODE_P (mode)
2607 || flag_unsafe_math_optimizations)
2609 tem = simplify_associative_operation (code, mode, op0, op1);
2610 if (tem)
2611 return tem;
2613 break;
2615 case IOR:
2616 if (trueop1 == CONST0_RTX (mode))
2617 return op0;
2618 if (INTEGRAL_MODE_P (mode)
2619 && trueop1 == CONSTM1_RTX (mode)
2620 && !side_effects_p (op0))
2621 return op1;
2622 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2623 return op0;
2624 /* A | (~A) -> -1 */
2625 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2626 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2627 && ! side_effects_p (op0)
2628 && SCALAR_INT_MODE_P (mode))
2629 return constm1_rtx;
2631 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2632 if (CONST_INT_P (op1)
2633 && HWI_COMPUTABLE_MODE_P (mode)
2634 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2635 && !side_effects_p (op0))
2636 return op1;
2638 /* Canonicalize (X & C1) | C2. */
2639 if (GET_CODE (op0) == AND
2640 && CONST_INT_P (trueop1)
2641 && CONST_INT_P (XEXP (op0, 1)))
2643 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2644 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2645 HOST_WIDE_INT c2 = INTVAL (trueop1);
2647 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2648 if ((c1 & c2) == c1
2649 && !side_effects_p (XEXP (op0, 0)))
2650 return trueop1;
2652 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2653 if (((c1|c2) & mask) == mask)
2654 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2656 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2657 if (((c1 & ~c2) & mask) != (c1 & mask))
2659 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2660 gen_int_mode (c1 & ~c2, mode));
2661 return simplify_gen_binary (IOR, mode, tem, op1);
2665 /* Convert (A & B) | A to A. */
2666 if (GET_CODE (op0) == AND
2667 && (rtx_equal_p (XEXP (op0, 0), op1)
2668 || rtx_equal_p (XEXP (op0, 1), op1))
2669 && ! side_effects_p (XEXP (op0, 0))
2670 && ! side_effects_p (XEXP (op0, 1)))
2671 return op1;
2673 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2674 mode size to (rotate A CX). */
2676 if (GET_CODE (op1) == ASHIFT
2677 || GET_CODE (op1) == SUBREG)
2679 opleft = op1;
2680 opright = op0;
2682 else
2684 opright = op1;
2685 opleft = op0;
2688 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2689 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2690 && CONST_INT_P (XEXP (opleft, 1))
2691 && CONST_INT_P (XEXP (opright, 1))
2692 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2693 == GET_MODE_PRECISION (mode)))
2694 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2696 /* Same, but for ashift that has been "simplified" to a wider mode
2697 by simplify_shift_const. */
2699 if (GET_CODE (opleft) == SUBREG
2700 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2701 && GET_CODE (opright) == LSHIFTRT
2702 && GET_CODE (XEXP (opright, 0)) == SUBREG
2703 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2704 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2705 && (GET_MODE_SIZE (GET_MODE (opleft))
2706 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2707 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2708 SUBREG_REG (XEXP (opright, 0)))
2709 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2710 && CONST_INT_P (XEXP (opright, 1))
2711 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2712 == GET_MODE_PRECISION (mode)))
2713 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2714 XEXP (SUBREG_REG (opleft), 1));
2716 /* If we have (ior (and (X C1) C2)), simplify this by making
2717 C1 as small as possible if C1 actually changes. */
2718 if (CONST_INT_P (op1)
2719 && (HWI_COMPUTABLE_MODE_P (mode)
2720 || INTVAL (op1) > 0)
2721 && GET_CODE (op0) == AND
2722 && CONST_INT_P (XEXP (op0, 1))
2723 && CONST_INT_P (op1)
2724 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2726 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2727 gen_int_mode (UINTVAL (XEXP (op0, 1))
2728 & ~UINTVAL (op1),
2729 mode));
2730 return simplify_gen_binary (IOR, mode, tmp, op1);
2733 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2734 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2735 the PLUS does not affect any of the bits in OP1: then we can do
2736 the IOR as a PLUS and we can associate. This is valid if OP1
2737 can be safely shifted left C bits. */
2738 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2739 && GET_CODE (XEXP (op0, 0)) == PLUS
2740 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2741 && CONST_INT_P (XEXP (op0, 1))
2742 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2744 int count = INTVAL (XEXP (op0, 1));
2745 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2747 if (mask >> count == INTVAL (trueop1)
2748 && trunc_int_for_mode (mask, mode) == mask
2749 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2750 return simplify_gen_binary (ASHIFTRT, mode,
2751 plus_constant (mode, XEXP (op0, 0),
2752 mask),
2753 XEXP (op0, 1));
2756 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2757 if (tem)
2758 return tem;
2760 tem = simplify_associative_operation (code, mode, op0, op1);
2761 if (tem)
2762 return tem;
2763 break;
2765 case XOR:
2766 if (trueop1 == CONST0_RTX (mode))
2767 return op0;
2768 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2769 return simplify_gen_unary (NOT, mode, op0, mode);
2770 if (rtx_equal_p (trueop0, trueop1)
2771 && ! side_effects_p (op0)
2772 && GET_MODE_CLASS (mode) != MODE_CC)
2773 return CONST0_RTX (mode);
2775 /* Canonicalize XOR of the most significant bit to PLUS. */
2776 if (CONST_SCALAR_INT_P (op1)
2777 && mode_signbit_p (mode, op1))
2778 return simplify_gen_binary (PLUS, mode, op0, op1);
2779 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2780 if (CONST_SCALAR_INT_P (op1)
2781 && GET_CODE (op0) == PLUS
2782 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2783 && mode_signbit_p (mode, XEXP (op0, 1)))
2784 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2785 simplify_gen_binary (XOR, mode, op1,
2786 XEXP (op0, 1)));
2788 /* If we are XORing two things that have no bits in common,
2789 convert them into an IOR. This helps to detect rotation encoded
2790 using those methods and possibly other simplifications. */
2792 if (HWI_COMPUTABLE_MODE_P (mode)
2793 && (nonzero_bits (op0, mode)
2794 & nonzero_bits (op1, mode)) == 0)
2795 return (simplify_gen_binary (IOR, mode, op0, op1));
2797 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2798 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2799 (NOT y). */
2801 int num_negated = 0;
2803 if (GET_CODE (op0) == NOT)
2804 num_negated++, op0 = XEXP (op0, 0);
2805 if (GET_CODE (op1) == NOT)
2806 num_negated++, op1 = XEXP (op1, 0);
2808 if (num_negated == 2)
2809 return simplify_gen_binary (XOR, mode, op0, op1);
2810 else if (num_negated == 1)
2811 return simplify_gen_unary (NOT, mode,
2812 simplify_gen_binary (XOR, mode, op0, op1),
2813 mode);
2816 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2817 correspond to a machine insn or result in further simplifications
2818 if B is a constant. */
2820 if (GET_CODE (op0) == AND
2821 && rtx_equal_p (XEXP (op0, 1), op1)
2822 && ! side_effects_p (op1))
2823 return simplify_gen_binary (AND, mode,
2824 simplify_gen_unary (NOT, mode,
2825 XEXP (op0, 0), mode),
2826 op1);
2828 else if (GET_CODE (op0) == AND
2829 && rtx_equal_p (XEXP (op0, 0), op1)
2830 && ! side_effects_p (op1))
2831 return simplify_gen_binary (AND, mode,
2832 simplify_gen_unary (NOT, mode,
2833 XEXP (op0, 1), mode),
2834 op1);
2836 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2837 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2838 out bits inverted twice and not set by C. Similarly, given
2839 (xor (and (xor A B) C) D), simplify without inverting C in
2840 the xor operand: (xor (and A C) (B&C)^D).
2842 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2843 && GET_CODE (XEXP (op0, 0)) == XOR
2844 && CONST_INT_P (op1)
2845 && CONST_INT_P (XEXP (op0, 1))
2846 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2848 enum rtx_code op = GET_CODE (op0);
2849 rtx a = XEXP (XEXP (op0, 0), 0);
2850 rtx b = XEXP (XEXP (op0, 0), 1);
2851 rtx c = XEXP (op0, 1);
2852 rtx d = op1;
2853 HOST_WIDE_INT bval = INTVAL (b);
2854 HOST_WIDE_INT cval = INTVAL (c);
2855 HOST_WIDE_INT dval = INTVAL (d);
2856 HOST_WIDE_INT xcval;
2858 if (op == IOR)
2859 xcval = ~cval;
2860 else
2861 xcval = cval;
2863 return simplify_gen_binary (XOR, mode,
2864 simplify_gen_binary (op, mode, a, c),
2865 gen_int_mode ((bval & xcval) ^ dval,
2866 mode));
2869 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2870 we can transform like this:
2871 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2872 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2873 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2874 Attempt a few simplifications when B and C are both constants. */
2875 if (GET_CODE (op0) == AND
2876 && CONST_INT_P (op1)
2877 && CONST_INT_P (XEXP (op0, 1)))
2879 rtx a = XEXP (op0, 0);
2880 rtx b = XEXP (op0, 1);
2881 rtx c = op1;
2882 HOST_WIDE_INT bval = INTVAL (b);
2883 HOST_WIDE_INT cval = INTVAL (c);
2885 /* Instead of computing ~A&C, we compute its negated value,
2886 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2887 optimize for sure. If it does not simplify, we still try
2888 to compute ~A&C below, but since that always allocates
2889 RTL, we don't try that before committing to returning a
2890 simplified expression. */
2891 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2892 GEN_INT (~cval));
2894 if ((~cval & bval) == 0)
2896 rtx na_c = NULL_RTX;
2897 if (n_na_c)
2898 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2899 else
2901 /* If ~A does not simplify, don't bother: we don't
2902 want to simplify 2 operations into 3, and if na_c
2903 were to simplify with na, n_na_c would have
2904 simplified as well. */
2905 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2906 if (na)
2907 na_c = simplify_gen_binary (AND, mode, na, c);
2910 /* Try to simplify ~A&C | ~B&C. */
2911 if (na_c != NULL_RTX)
2912 return simplify_gen_binary (IOR, mode, na_c,
2913 gen_int_mode (~bval & cval, mode));
2915 else
2917 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2918 if (n_na_c == CONSTM1_RTX (mode))
2920 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2921 gen_int_mode (~cval & bval,
2922 mode));
2923 return simplify_gen_binary (IOR, mode, a_nc_b,
2924 gen_int_mode (~bval & cval,
2925 mode));
2930 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
2931 do (ior (and A ~C) (and B C)) which is a machine instruction on some
2932 machines, and also has shorter instruction path length. */
2933 if (GET_CODE (op0) == AND
2934 && GET_CODE (XEXP (op0, 0)) == XOR
2935 && CONST_INT_P (XEXP (op0, 1))
2936 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
2938 rtx a = trueop1;
2939 rtx b = XEXP (XEXP (op0, 0), 1);
2940 rtx c = XEXP (op0, 1);
2941 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2942 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
2943 rtx bc = simplify_gen_binary (AND, mode, b, c);
2944 return simplify_gen_binary (IOR, mode, a_nc, bc);
2946 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
2947 else if (GET_CODE (op0) == AND
2948 && GET_CODE (XEXP (op0, 0)) == XOR
2949 && CONST_INT_P (XEXP (op0, 1))
2950 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
2952 rtx a = XEXP (XEXP (op0, 0), 0);
2953 rtx b = trueop1;
2954 rtx c = XEXP (op0, 1);
2955 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
2956 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
2957 rtx ac = simplify_gen_binary (AND, mode, a, c);
2958 return simplify_gen_binary (IOR, mode, ac, b_nc);
2961 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2962 comparison if STORE_FLAG_VALUE is 1. */
2963 if (STORE_FLAG_VALUE == 1
2964 && trueop1 == const1_rtx
2965 && COMPARISON_P (op0)
2966 && (reversed = reversed_comparison (op0, mode)))
2967 return reversed;
2969 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2970 is (lt foo (const_int 0)), so we can perform the above
2971 simplification if STORE_FLAG_VALUE is 1. */
2973 if (STORE_FLAG_VALUE == 1
2974 && trueop1 == const1_rtx
2975 && GET_CODE (op0) == LSHIFTRT
2976 && CONST_INT_P (XEXP (op0, 1))
2977 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2978 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2980 /* (xor (comparison foo bar) (const_int sign-bit))
2981 when STORE_FLAG_VALUE is the sign bit. */
2982 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2983 && trueop1 == const_true_rtx
2984 && COMPARISON_P (op0)
2985 && (reversed = reversed_comparison (op0, mode)))
2986 return reversed;
2988 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2989 if (tem)
2990 return tem;
2992 tem = simplify_associative_operation (code, mode, op0, op1);
2993 if (tem)
2994 return tem;
2995 break;
2997 case AND:
2998 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2999 return trueop1;
3000 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3001 return op0;
3002 if (HWI_COMPUTABLE_MODE_P (mode))
3004 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3005 HOST_WIDE_INT nzop1;
3006 if (CONST_INT_P (trueop1))
3008 HOST_WIDE_INT val1 = INTVAL (trueop1);
3009 /* If we are turning off bits already known off in OP0, we need
3010 not do an AND. */
3011 if ((nzop0 & ~val1) == 0)
3012 return op0;
3014 nzop1 = nonzero_bits (trueop1, mode);
3015 /* If we are clearing all the nonzero bits, the result is zero. */
3016 if ((nzop1 & nzop0) == 0
3017 && !side_effects_p (op0) && !side_effects_p (op1))
3018 return CONST0_RTX (mode);
3020 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3021 && GET_MODE_CLASS (mode) != MODE_CC)
3022 return op0;
3023 /* A & (~A) -> 0 */
3024 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3025 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3026 && ! side_effects_p (op0)
3027 && GET_MODE_CLASS (mode) != MODE_CC)
3028 return CONST0_RTX (mode);
3030 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3031 there are no nonzero bits of C outside of X's mode. */
3032 if ((GET_CODE (op0) == SIGN_EXTEND
3033 || GET_CODE (op0) == ZERO_EXTEND)
3034 && CONST_INT_P (trueop1)
3035 && HWI_COMPUTABLE_MODE_P (mode)
3036 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3037 & UINTVAL (trueop1)) == 0)
3039 machine_mode imode = GET_MODE (XEXP (op0, 0));
3040 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3041 gen_int_mode (INTVAL (trueop1),
3042 imode));
3043 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3046 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3047 we might be able to further simplify the AND with X and potentially
3048 remove the truncation altogether. */
3049 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3051 rtx x = XEXP (op0, 0);
3052 machine_mode xmode = GET_MODE (x);
3053 tem = simplify_gen_binary (AND, xmode, x,
3054 gen_int_mode (INTVAL (trueop1), xmode));
3055 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3058 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3059 if (GET_CODE (op0) == IOR
3060 && CONST_INT_P (trueop1)
3061 && CONST_INT_P (XEXP (op0, 1)))
3063 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3064 return simplify_gen_binary (IOR, mode,
3065 simplify_gen_binary (AND, mode,
3066 XEXP (op0, 0), op1),
3067 gen_int_mode (tmp, mode));
3070 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3071 insn (and may simplify more). */
3072 if (GET_CODE (op0) == XOR
3073 && rtx_equal_p (XEXP (op0, 0), op1)
3074 && ! side_effects_p (op1))
3075 return simplify_gen_binary (AND, mode,
3076 simplify_gen_unary (NOT, mode,
3077 XEXP (op0, 1), mode),
3078 op1);
3080 if (GET_CODE (op0) == XOR
3081 && rtx_equal_p (XEXP (op0, 1), op1)
3082 && ! side_effects_p (op1))
3083 return simplify_gen_binary (AND, mode,
3084 simplify_gen_unary (NOT, mode,
3085 XEXP (op0, 0), mode),
3086 op1);
3088 /* Similarly for (~(A ^ B)) & A. */
3089 if (GET_CODE (op0) == NOT
3090 && GET_CODE (XEXP (op0, 0)) == XOR
3091 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3092 && ! side_effects_p (op1))
3093 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3095 if (GET_CODE (op0) == NOT
3096 && GET_CODE (XEXP (op0, 0)) == XOR
3097 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3098 && ! side_effects_p (op1))
3099 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3101 /* Convert (A | B) & A to A. */
3102 if (GET_CODE (op0) == IOR
3103 && (rtx_equal_p (XEXP (op0, 0), op1)
3104 || rtx_equal_p (XEXP (op0, 1), op1))
3105 && ! side_effects_p (XEXP (op0, 0))
3106 && ! side_effects_p (XEXP (op0, 1)))
3107 return op1;
3109 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3110 ((A & N) + B) & M -> (A + B) & M
3111 Similarly if (N & M) == 0,
3112 ((A | N) + B) & M -> (A + B) & M
3113 and for - instead of + and/or ^ instead of |.
3114 Also, if (N & M) == 0, then
3115 (A +- N) & M -> A & M. */
3116 if (CONST_INT_P (trueop1)
3117 && HWI_COMPUTABLE_MODE_P (mode)
3118 && ~UINTVAL (trueop1)
3119 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3120 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3122 rtx pmop[2];
3123 int which;
3125 pmop[0] = XEXP (op0, 0);
3126 pmop[1] = XEXP (op0, 1);
3128 if (CONST_INT_P (pmop[1])
3129 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3130 return simplify_gen_binary (AND, mode, pmop[0], op1);
3132 for (which = 0; which < 2; which++)
3134 tem = pmop[which];
3135 switch (GET_CODE (tem))
3137 case AND:
3138 if (CONST_INT_P (XEXP (tem, 1))
3139 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3140 == UINTVAL (trueop1))
3141 pmop[which] = XEXP (tem, 0);
3142 break;
3143 case IOR:
3144 case XOR:
3145 if (CONST_INT_P (XEXP (tem, 1))
3146 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3147 pmop[which] = XEXP (tem, 0);
3148 break;
3149 default:
3150 break;
3154 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3156 tem = simplify_gen_binary (GET_CODE (op0), mode,
3157 pmop[0], pmop[1]);
3158 return simplify_gen_binary (code, mode, tem, op1);
3162 /* (and X (ior (not X) Y) -> (and X Y) */
3163 if (GET_CODE (op1) == IOR
3164 && GET_CODE (XEXP (op1, 0)) == NOT
3165 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3166 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3168 /* (and (ior (not X) Y) X) -> (and X Y) */
3169 if (GET_CODE (op0) == IOR
3170 && GET_CODE (XEXP (op0, 0)) == NOT
3171 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3172 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3174 /* (and X (ior Y (not X)) -> (and X Y) */
3175 if (GET_CODE (op1) == IOR
3176 && GET_CODE (XEXP (op1, 1)) == NOT
3177 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3178 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3180 /* (and (ior Y (not X)) X) -> (and X Y) */
3181 if (GET_CODE (op0) == IOR
3182 && GET_CODE (XEXP (op0, 1)) == NOT
3183 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3184 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3186 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3187 if (tem)
3188 return tem;
3190 tem = simplify_associative_operation (code, mode, op0, op1);
3191 if (tem)
3192 return tem;
3193 break;
3195 case UDIV:
3196 /* 0/x is 0 (or x&0 if x has side-effects). */
3197 if (trueop0 == CONST0_RTX (mode))
3199 if (side_effects_p (op1))
3200 return simplify_gen_binary (AND, mode, op1, trueop0);
3201 return trueop0;
3203 /* x/1 is x. */
3204 if (trueop1 == CONST1_RTX (mode))
3206 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3207 if (tem)
3208 return tem;
3210 /* Convert divide by power of two into shift. */
3211 if (CONST_INT_P (trueop1)
3212 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3213 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3214 break;
3216 case DIV:
3217 /* Handle floating point and integers separately. */
3218 if (SCALAR_FLOAT_MODE_P (mode))
3220 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3221 safe for modes with NaNs, since 0.0 / 0.0 will then be
3222 NaN rather than 0.0. Nor is it safe for modes with signed
3223 zeros, since dividing 0 by a negative number gives -0.0 */
3224 if (trueop0 == CONST0_RTX (mode)
3225 && !HONOR_NANS (mode)
3226 && !HONOR_SIGNED_ZEROS (mode)
3227 && ! side_effects_p (op1))
3228 return op0;
3229 /* x/1.0 is x. */
3230 if (trueop1 == CONST1_RTX (mode)
3231 && !HONOR_SNANS (mode))
3232 return op0;
3234 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3235 && trueop1 != CONST0_RTX (mode))
3237 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3239 /* x/-1.0 is -x. */
3240 if (real_equal (d1, &dconstm1)
3241 && !HONOR_SNANS (mode))
3242 return simplify_gen_unary (NEG, mode, op0, mode);
3244 /* Change FP division by a constant into multiplication.
3245 Only do this with -freciprocal-math. */
3246 if (flag_reciprocal_math
3247 && !real_equal (d1, &dconst0))
3249 REAL_VALUE_TYPE d;
3250 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3251 tem = const_double_from_real_value (d, mode);
3252 return simplify_gen_binary (MULT, mode, op0, tem);
3256 else if (SCALAR_INT_MODE_P (mode))
3258 /* 0/x is 0 (or x&0 if x has side-effects). */
3259 if (trueop0 == CONST0_RTX (mode)
3260 && !cfun->can_throw_non_call_exceptions)
3262 if (side_effects_p (op1))
3263 return simplify_gen_binary (AND, mode, op1, trueop0);
3264 return trueop0;
3266 /* x/1 is x. */
3267 if (trueop1 == CONST1_RTX (mode))
3269 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3270 if (tem)
3271 return tem;
3273 /* x/-1 is -x. */
3274 if (trueop1 == constm1_rtx)
3276 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3277 if (x)
3278 return simplify_gen_unary (NEG, mode, x, mode);
3281 break;
3283 case UMOD:
3284 /* 0%x is 0 (or x&0 if x has side-effects). */
3285 if (trueop0 == CONST0_RTX (mode))
3287 if (side_effects_p (op1))
3288 return simplify_gen_binary (AND, mode, op1, trueop0);
3289 return trueop0;
3291 /* x%1 is 0 (of x&0 if x has side-effects). */
3292 if (trueop1 == CONST1_RTX (mode))
3294 if (side_effects_p (op0))
3295 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3296 return CONST0_RTX (mode);
3298 /* Implement modulus by power of two as AND. */
3299 if (CONST_INT_P (trueop1)
3300 && exact_log2 (UINTVAL (trueop1)) > 0)
3301 return simplify_gen_binary (AND, mode, op0,
3302 gen_int_mode (INTVAL (op1) - 1, mode));
3303 break;
3305 case MOD:
3306 /* 0%x is 0 (or x&0 if x has side-effects). */
3307 if (trueop0 == CONST0_RTX (mode))
3309 if (side_effects_p (op1))
3310 return simplify_gen_binary (AND, mode, op1, trueop0);
3311 return trueop0;
3313 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3314 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3316 if (side_effects_p (op0))
3317 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3318 return CONST0_RTX (mode);
3320 break;
3322 case ROTATERT:
3323 case ROTATE:
3324 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3325 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3326 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3327 amount instead. */
3328 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3329 if (CONST_INT_P (trueop1)
3330 && IN_RANGE (INTVAL (trueop1),
3331 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3332 GET_MODE_PRECISION (mode) - 1))
3333 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3334 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3335 - INTVAL (trueop1)));
3336 #endif
3337 /* FALLTHRU */
3338 case ASHIFTRT:
3339 if (trueop1 == CONST0_RTX (mode))
3340 return op0;
3341 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3342 return op0;
3343 /* Rotating ~0 always results in ~0. */
3344 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3345 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3346 && ! side_effects_p (op1))
3347 return op0;
3349 canonicalize_shift:
3350 /* Given:
3351 scalar modes M1, M2
3352 scalar constants c1, c2
3353 size (M2) > size (M1)
3354 c1 == size (M2) - size (M1)
3355 optimize:
3356 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3357 <low_part>)
3358 (const_int <c2>))
3360 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3361 <low_part>). */
3362 if ((code == ASHIFTRT || code == LSHIFTRT)
3363 && !VECTOR_MODE_P (mode)
3364 && SUBREG_P (op0)
3365 && CONST_INT_P (op1)
3366 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3367 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3368 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3369 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3370 > GET_MODE_BITSIZE (mode))
3371 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3372 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3373 - GET_MODE_BITSIZE (mode)))
3374 && subreg_lowpart_p (op0))
3376 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3377 + INTVAL (op1));
3378 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3379 tmp = simplify_gen_binary (code,
3380 GET_MODE (SUBREG_REG (op0)),
3381 XEXP (SUBREG_REG (op0), 0),
3382 tmp);
3383 return lowpart_subreg (mode, tmp, inner_mode);
3386 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3388 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3389 if (val != INTVAL (op1))
3390 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3392 break;
3394 case ASHIFT:
3395 case SS_ASHIFT:
3396 case US_ASHIFT:
3397 if (trueop1 == CONST0_RTX (mode))
3398 return op0;
3399 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3400 return op0;
3401 goto canonicalize_shift;
3403 case LSHIFTRT:
3404 if (trueop1 == CONST0_RTX (mode))
3405 return op0;
3406 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3407 return op0;
3408 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3409 if (GET_CODE (op0) == CLZ
3410 && CONST_INT_P (trueop1)
3411 && STORE_FLAG_VALUE == 1
3412 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3414 machine_mode imode = GET_MODE (XEXP (op0, 0));
3415 unsigned HOST_WIDE_INT zero_val = 0;
3417 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3418 && zero_val == GET_MODE_PRECISION (imode)
3419 && INTVAL (trueop1) == exact_log2 (zero_val))
3420 return simplify_gen_relational (EQ, mode, imode,
3421 XEXP (op0, 0), const0_rtx);
3423 goto canonicalize_shift;
3425 case SMIN:
3426 if (width <= HOST_BITS_PER_WIDE_INT
3427 && mode_signbit_p (mode, trueop1)
3428 && ! side_effects_p (op0))
3429 return op1;
3430 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3431 return op0;
3432 tem = simplify_associative_operation (code, mode, op0, op1);
3433 if (tem)
3434 return tem;
3435 break;
3437 case SMAX:
3438 if (width <= HOST_BITS_PER_WIDE_INT
3439 && CONST_INT_P (trueop1)
3440 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3441 && ! side_effects_p (op0))
3442 return op1;
3443 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3444 return op0;
3445 tem = simplify_associative_operation (code, mode, op0, op1);
3446 if (tem)
3447 return tem;
3448 break;
3450 case UMIN:
3451 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3452 return op1;
3453 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3454 return op0;
3455 tem = simplify_associative_operation (code, mode, op0, op1);
3456 if (tem)
3457 return tem;
3458 break;
3460 case UMAX:
3461 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3462 return op1;
3463 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3464 return op0;
3465 tem = simplify_associative_operation (code, mode, op0, op1);
3466 if (tem)
3467 return tem;
3468 break;
3470 case SS_PLUS:
3471 case US_PLUS:
3472 case SS_MINUS:
3473 case US_MINUS:
3474 case SS_MULT:
3475 case US_MULT:
3476 case SS_DIV:
3477 case US_DIV:
3478 /* ??? There are simplifications that can be done. */
3479 return 0;
3481 case VEC_SELECT:
3482 if (!VECTOR_MODE_P (mode))
3484 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3485 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3486 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3487 gcc_assert (XVECLEN (trueop1, 0) == 1);
3488 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3490 if (GET_CODE (trueop0) == CONST_VECTOR)
3491 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3492 (trueop1, 0, 0)));
3494 /* Extract a scalar element from a nested VEC_SELECT expression
3495 (with optional nested VEC_CONCAT expression). Some targets
3496 (i386) extract scalar element from a vector using chain of
3497 nested VEC_SELECT expressions. When input operand is a memory
3498 operand, this operation can be simplified to a simple scalar
3499 load from an offseted memory address. */
3500 if (GET_CODE (trueop0) == VEC_SELECT)
3502 rtx op0 = XEXP (trueop0, 0);
3503 rtx op1 = XEXP (trueop0, 1);
3505 machine_mode opmode = GET_MODE (op0);
3506 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3507 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3509 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3510 int elem;
3512 rtvec vec;
3513 rtx tmp_op, tmp;
3515 gcc_assert (GET_CODE (op1) == PARALLEL);
3516 gcc_assert (i < n_elts);
3518 /* Select element, pointed by nested selector. */
3519 elem = INTVAL (XVECEXP (op1, 0, i));
3521 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3522 if (GET_CODE (op0) == VEC_CONCAT)
3524 rtx op00 = XEXP (op0, 0);
3525 rtx op01 = XEXP (op0, 1);
3527 machine_mode mode00, mode01;
3528 int n_elts00, n_elts01;
3530 mode00 = GET_MODE (op00);
3531 mode01 = GET_MODE (op01);
3533 /* Find out number of elements of each operand. */
3534 if (VECTOR_MODE_P (mode00))
3536 elt_size = GET_MODE_UNIT_SIZE (mode00);
3537 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3539 else
3540 n_elts00 = 1;
3542 if (VECTOR_MODE_P (mode01))
3544 elt_size = GET_MODE_UNIT_SIZE (mode01);
3545 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3547 else
3548 n_elts01 = 1;
3550 gcc_assert (n_elts == n_elts00 + n_elts01);
3552 /* Select correct operand of VEC_CONCAT
3553 and adjust selector. */
3554 if (elem < n_elts01)
3555 tmp_op = op00;
3556 else
3558 tmp_op = op01;
3559 elem -= n_elts00;
3562 else
3563 tmp_op = op0;
3565 vec = rtvec_alloc (1);
3566 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3568 tmp = gen_rtx_fmt_ee (code, mode,
3569 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3570 return tmp;
3572 if (GET_CODE (trueop0) == VEC_DUPLICATE
3573 && GET_MODE (XEXP (trueop0, 0)) == mode)
3574 return XEXP (trueop0, 0);
3576 else
3578 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3579 gcc_assert (GET_MODE_INNER (mode)
3580 == GET_MODE_INNER (GET_MODE (trueop0)));
3581 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3583 if (GET_CODE (trueop0) == CONST_VECTOR)
3585 int elt_size = GET_MODE_UNIT_SIZE (mode);
3586 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3587 rtvec v = rtvec_alloc (n_elts);
3588 unsigned int i;
3590 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3591 for (i = 0; i < n_elts; i++)
3593 rtx x = XVECEXP (trueop1, 0, i);
3595 gcc_assert (CONST_INT_P (x));
3596 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3597 INTVAL (x));
3600 return gen_rtx_CONST_VECTOR (mode, v);
3603 /* Recognize the identity. */
3604 if (GET_MODE (trueop0) == mode)
3606 bool maybe_ident = true;
3607 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3609 rtx j = XVECEXP (trueop1, 0, i);
3610 if (!CONST_INT_P (j) || INTVAL (j) != i)
3612 maybe_ident = false;
3613 break;
3616 if (maybe_ident)
3617 return trueop0;
3620 /* If we build {a,b} then permute it, build the result directly. */
3621 if (XVECLEN (trueop1, 0) == 2
3622 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3623 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3624 && GET_CODE (trueop0) == VEC_CONCAT
3625 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3626 && GET_MODE (XEXP (trueop0, 0)) == mode
3627 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3628 && GET_MODE (XEXP (trueop0, 1)) == mode)
3630 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3631 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3632 rtx subop0, subop1;
3634 gcc_assert (i0 < 4 && i1 < 4);
3635 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3636 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3638 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3641 if (XVECLEN (trueop1, 0) == 2
3642 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3643 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3644 && GET_CODE (trueop0) == VEC_CONCAT
3645 && GET_MODE (trueop0) == mode)
3647 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3648 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3649 rtx subop0, subop1;
3651 gcc_assert (i0 < 2 && i1 < 2);
3652 subop0 = XEXP (trueop0, i0);
3653 subop1 = XEXP (trueop0, i1);
3655 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3658 /* If we select one half of a vec_concat, return that. */
3659 if (GET_CODE (trueop0) == VEC_CONCAT
3660 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3662 rtx subop0 = XEXP (trueop0, 0);
3663 rtx subop1 = XEXP (trueop0, 1);
3664 machine_mode mode0 = GET_MODE (subop0);
3665 machine_mode mode1 = GET_MODE (subop1);
3666 int li = GET_MODE_UNIT_SIZE (mode0);
3667 int l0 = GET_MODE_SIZE (mode0) / li;
3668 int l1 = GET_MODE_SIZE (mode1) / li;
3669 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3670 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3672 bool success = true;
3673 for (int i = 1; i < l0; ++i)
3675 rtx j = XVECEXP (trueop1, 0, i);
3676 if (!CONST_INT_P (j) || INTVAL (j) != i)
3678 success = false;
3679 break;
3682 if (success)
3683 return subop0;
3685 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3687 bool success = true;
3688 for (int i = 1; i < l1; ++i)
3690 rtx j = XVECEXP (trueop1, 0, i);
3691 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3693 success = false;
3694 break;
3697 if (success)
3698 return subop1;
3703 if (XVECLEN (trueop1, 0) == 1
3704 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3705 && GET_CODE (trueop0) == VEC_CONCAT)
3707 rtx vec = trueop0;
3708 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3710 /* Try to find the element in the VEC_CONCAT. */
3711 while (GET_MODE (vec) != mode
3712 && GET_CODE (vec) == VEC_CONCAT)
3714 HOST_WIDE_INT vec_size;
3716 if (CONST_INT_P (XEXP (vec, 0)))
3718 /* vec_concat of two const_ints doesn't make sense with
3719 respect to modes. */
3720 if (CONST_INT_P (XEXP (vec, 1)))
3721 return 0;
3723 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3724 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3726 else
3727 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3729 if (offset < vec_size)
3730 vec = XEXP (vec, 0);
3731 else
3733 offset -= vec_size;
3734 vec = XEXP (vec, 1);
3736 vec = avoid_constant_pool_reference (vec);
3739 if (GET_MODE (vec) == mode)
3740 return vec;
3743 /* If we select elements in a vec_merge that all come from the same
3744 operand, select from that operand directly. */
3745 if (GET_CODE (op0) == VEC_MERGE)
3747 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3748 if (CONST_INT_P (trueop02))
3750 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3751 bool all_operand0 = true;
3752 bool all_operand1 = true;
3753 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3755 rtx j = XVECEXP (trueop1, 0, i);
3756 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3757 all_operand1 = false;
3758 else
3759 all_operand0 = false;
3761 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3762 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3763 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3764 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3768 /* If we have two nested selects that are inverses of each
3769 other, replace them with the source operand. */
3770 if (GET_CODE (trueop0) == VEC_SELECT
3771 && GET_MODE (XEXP (trueop0, 0)) == mode)
3773 rtx op0_subop1 = XEXP (trueop0, 1);
3774 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3775 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3777 /* Apply the outer ordering vector to the inner one. (The inner
3778 ordering vector is expressly permitted to be of a different
3779 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3780 then the two VEC_SELECTs cancel. */
3781 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3783 rtx x = XVECEXP (trueop1, 0, i);
3784 if (!CONST_INT_P (x))
3785 return 0;
3786 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3787 if (!CONST_INT_P (y) || i != INTVAL (y))
3788 return 0;
3790 return XEXP (trueop0, 0);
3793 return 0;
3794 case VEC_CONCAT:
3796 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3797 ? GET_MODE (trueop0)
3798 : GET_MODE_INNER (mode));
3799 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3800 ? GET_MODE (trueop1)
3801 : GET_MODE_INNER (mode));
3803 gcc_assert (VECTOR_MODE_P (mode));
3804 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3805 == GET_MODE_SIZE (mode));
3807 if (VECTOR_MODE_P (op0_mode))
3808 gcc_assert (GET_MODE_INNER (mode)
3809 == GET_MODE_INNER (op0_mode));
3810 else
3811 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3813 if (VECTOR_MODE_P (op1_mode))
3814 gcc_assert (GET_MODE_INNER (mode)
3815 == GET_MODE_INNER (op1_mode));
3816 else
3817 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3819 if ((GET_CODE (trueop0) == CONST_VECTOR
3820 || CONST_SCALAR_INT_P (trueop0)
3821 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3822 && (GET_CODE (trueop1) == CONST_VECTOR
3823 || CONST_SCALAR_INT_P (trueop1)
3824 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3826 int elt_size = GET_MODE_UNIT_SIZE (mode);
3827 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3828 rtvec v = rtvec_alloc (n_elts);
3829 unsigned int i;
3830 unsigned in_n_elts = 1;
3832 if (VECTOR_MODE_P (op0_mode))
3833 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3834 for (i = 0; i < n_elts; i++)
3836 if (i < in_n_elts)
3838 if (!VECTOR_MODE_P (op0_mode))
3839 RTVEC_ELT (v, i) = trueop0;
3840 else
3841 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3843 else
3845 if (!VECTOR_MODE_P (op1_mode))
3846 RTVEC_ELT (v, i) = trueop1;
3847 else
3848 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3849 i - in_n_elts);
3853 return gen_rtx_CONST_VECTOR (mode, v);
3856 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3857 Restrict the transformation to avoid generating a VEC_SELECT with a
3858 mode unrelated to its operand. */
3859 if (GET_CODE (trueop0) == VEC_SELECT
3860 && GET_CODE (trueop1) == VEC_SELECT
3861 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3862 && GET_MODE (XEXP (trueop0, 0)) == mode)
3864 rtx par0 = XEXP (trueop0, 1);
3865 rtx par1 = XEXP (trueop1, 1);
3866 int len0 = XVECLEN (par0, 0);
3867 int len1 = XVECLEN (par1, 0);
3868 rtvec vec = rtvec_alloc (len0 + len1);
3869 for (int i = 0; i < len0; i++)
3870 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3871 for (int i = 0; i < len1; i++)
3872 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3873 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3874 gen_rtx_PARALLEL (VOIDmode, vec));
3877 return 0;
3879 default:
3880 gcc_unreachable ();
3883 return 0;
3887 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3888 rtx op0, rtx op1)
3890 unsigned int width = GET_MODE_PRECISION (mode);
3892 if (VECTOR_MODE_P (mode)
3893 && code != VEC_CONCAT
3894 && GET_CODE (op0) == CONST_VECTOR
3895 && GET_CODE (op1) == CONST_VECTOR)
3897 unsigned n_elts = GET_MODE_NUNITS (mode);
3898 machine_mode op0mode = GET_MODE (op0);
3899 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3900 machine_mode op1mode = GET_MODE (op1);
3901 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3902 rtvec v = rtvec_alloc (n_elts);
3903 unsigned int i;
3905 gcc_assert (op0_n_elts == n_elts);
3906 gcc_assert (op1_n_elts == n_elts);
3907 for (i = 0; i < n_elts; i++)
3909 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3910 CONST_VECTOR_ELT (op0, i),
3911 CONST_VECTOR_ELT (op1, i));
3912 if (!x)
3913 return 0;
3914 RTVEC_ELT (v, i) = x;
3917 return gen_rtx_CONST_VECTOR (mode, v);
3920 if (VECTOR_MODE_P (mode)
3921 && code == VEC_CONCAT
3922 && (CONST_SCALAR_INT_P (op0)
3923 || GET_CODE (op0) == CONST_FIXED
3924 || CONST_DOUBLE_AS_FLOAT_P (op0))
3925 && (CONST_SCALAR_INT_P (op1)
3926 || CONST_DOUBLE_AS_FLOAT_P (op1)
3927 || GET_CODE (op1) == CONST_FIXED))
3929 unsigned n_elts = GET_MODE_NUNITS (mode);
3930 rtvec v = rtvec_alloc (n_elts);
3932 gcc_assert (n_elts >= 2);
3933 if (n_elts == 2)
3935 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3936 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3938 RTVEC_ELT (v, 0) = op0;
3939 RTVEC_ELT (v, 1) = op1;
3941 else
3943 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3944 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3945 unsigned i;
3947 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3948 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3949 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3951 for (i = 0; i < op0_n_elts; ++i)
3952 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3953 for (i = 0; i < op1_n_elts; ++i)
3954 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3957 return gen_rtx_CONST_VECTOR (mode, v);
3960 if (SCALAR_FLOAT_MODE_P (mode)
3961 && CONST_DOUBLE_AS_FLOAT_P (op0)
3962 && CONST_DOUBLE_AS_FLOAT_P (op1)
3963 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3965 if (code == AND
3966 || code == IOR
3967 || code == XOR)
3969 long tmp0[4];
3970 long tmp1[4];
3971 REAL_VALUE_TYPE r;
3972 int i;
3974 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3975 GET_MODE (op0));
3976 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3977 GET_MODE (op1));
3978 for (i = 0; i < 4; i++)
3980 switch (code)
3982 case AND:
3983 tmp0[i] &= tmp1[i];
3984 break;
3985 case IOR:
3986 tmp0[i] |= tmp1[i];
3987 break;
3988 case XOR:
3989 tmp0[i] ^= tmp1[i];
3990 break;
3991 default:
3992 gcc_unreachable ();
3995 real_from_target (&r, tmp0, mode);
3996 return const_double_from_real_value (r, mode);
3998 else
4000 REAL_VALUE_TYPE f0, f1, value, result;
4001 const REAL_VALUE_TYPE *opr0, *opr1;
4002 bool inexact;
4004 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4005 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4007 if (HONOR_SNANS (mode)
4008 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4009 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4010 return 0;
4012 real_convert (&f0, mode, opr0);
4013 real_convert (&f1, mode, opr1);
4015 if (code == DIV
4016 && real_equal (&f1, &dconst0)
4017 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4018 return 0;
4020 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4021 && flag_trapping_math
4022 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4024 int s0 = REAL_VALUE_NEGATIVE (f0);
4025 int s1 = REAL_VALUE_NEGATIVE (f1);
4027 switch (code)
4029 case PLUS:
4030 /* Inf + -Inf = NaN plus exception. */
4031 if (s0 != s1)
4032 return 0;
4033 break;
4034 case MINUS:
4035 /* Inf - Inf = NaN plus exception. */
4036 if (s0 == s1)
4037 return 0;
4038 break;
4039 case DIV:
4040 /* Inf / Inf = NaN plus exception. */
4041 return 0;
4042 default:
4043 break;
4047 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4048 && flag_trapping_math
4049 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4050 || (REAL_VALUE_ISINF (f1)
4051 && real_equal (&f0, &dconst0))))
4052 /* Inf * 0 = NaN plus exception. */
4053 return 0;
4055 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4056 &f0, &f1);
4057 real_convert (&result, mode, &value);
4059 /* Don't constant fold this floating point operation if
4060 the result has overflowed and flag_trapping_math. */
4062 if (flag_trapping_math
4063 && MODE_HAS_INFINITIES (mode)
4064 && REAL_VALUE_ISINF (result)
4065 && !REAL_VALUE_ISINF (f0)
4066 && !REAL_VALUE_ISINF (f1))
4067 /* Overflow plus exception. */
4068 return 0;
4070 /* Don't constant fold this floating point operation if the
4071 result may dependent upon the run-time rounding mode and
4072 flag_rounding_math is set, or if GCC's software emulation
4073 is unable to accurately represent the result. */
4075 if ((flag_rounding_math
4076 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4077 && (inexact || !real_identical (&result, &value)))
4078 return NULL_RTX;
4080 return const_double_from_real_value (result, mode);
4084 /* We can fold some multi-word operations. */
4085 if ((GET_MODE_CLASS (mode) == MODE_INT
4086 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4087 && CONST_SCALAR_INT_P (op0)
4088 && CONST_SCALAR_INT_P (op1))
4090 wide_int result;
4091 bool overflow;
4092 rtx_mode_t pop0 = rtx_mode_t (op0, mode);
4093 rtx_mode_t pop1 = rtx_mode_t (op1, mode);
4095 #if TARGET_SUPPORTS_WIDE_INT == 0
4096 /* This assert keeps the simplification from producing a result
4097 that cannot be represented in a CONST_DOUBLE but a lot of
4098 upstream callers expect that this function never fails to
4099 simplify something and so you if you added this to the test
4100 above the code would die later anyway. If this assert
4101 happens, you just need to make the port support wide int. */
4102 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4103 #endif
4104 switch (code)
4106 case MINUS:
4107 result = wi::sub (pop0, pop1);
4108 break;
4110 case PLUS:
4111 result = wi::add (pop0, pop1);
4112 break;
4114 case MULT:
4115 result = wi::mul (pop0, pop1);
4116 break;
4118 case DIV:
4119 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4120 if (overflow)
4121 return NULL_RTX;
4122 break;
4124 case MOD:
4125 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4126 if (overflow)
4127 return NULL_RTX;
4128 break;
4130 case UDIV:
4131 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4132 if (overflow)
4133 return NULL_RTX;
4134 break;
4136 case UMOD:
4137 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4138 if (overflow)
4139 return NULL_RTX;
4140 break;
4142 case AND:
4143 result = wi::bit_and (pop0, pop1);
4144 break;
4146 case IOR:
4147 result = wi::bit_or (pop0, pop1);
4148 break;
4150 case XOR:
4151 result = wi::bit_xor (pop0, pop1);
4152 break;
4154 case SMIN:
4155 result = wi::smin (pop0, pop1);
4156 break;
4158 case SMAX:
4159 result = wi::smax (pop0, pop1);
4160 break;
4162 case UMIN:
4163 result = wi::umin (pop0, pop1);
4164 break;
4166 case UMAX:
4167 result = wi::umax (pop0, pop1);
4168 break;
4170 case LSHIFTRT:
4171 case ASHIFTRT:
4172 case ASHIFT:
4174 wide_int wop1 = pop1;
4175 if (SHIFT_COUNT_TRUNCATED)
4176 wop1 = wi::umod_trunc (wop1, width);
4177 else if (wi::geu_p (wop1, width))
4178 return NULL_RTX;
4180 switch (code)
4182 case LSHIFTRT:
4183 result = wi::lrshift (pop0, wop1);
4184 break;
4186 case ASHIFTRT:
4187 result = wi::arshift (pop0, wop1);
4188 break;
4190 case ASHIFT:
4191 result = wi::lshift (pop0, wop1);
4192 break;
4194 default:
4195 gcc_unreachable ();
4197 break;
4199 case ROTATE:
4200 case ROTATERT:
4202 if (wi::neg_p (pop1))
4203 return NULL_RTX;
4205 switch (code)
4207 case ROTATE:
4208 result = wi::lrotate (pop0, pop1);
4209 break;
4211 case ROTATERT:
4212 result = wi::rrotate (pop0, pop1);
4213 break;
4215 default:
4216 gcc_unreachable ();
4218 break;
4220 default:
4221 return NULL_RTX;
4223 return immed_wide_int_const (result, mode);
4226 return NULL_RTX;
4231 /* Return a positive integer if X should sort after Y. The value
4232 returned is 1 if and only if X and Y are both regs. */
4234 static int
4235 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4237 int result;
4239 result = (commutative_operand_precedence (y)
4240 - commutative_operand_precedence (x));
4241 if (result)
4242 return result + result;
4244 /* Group together equal REGs to do more simplification. */
4245 if (REG_P (x) && REG_P (y))
4246 return REGNO (x) > REGNO (y);
4248 return 0;
4251 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4252 operands may be another PLUS or MINUS.
4254 Rather than test for specific case, we do this by a brute-force method
4255 and do all possible simplifications until no more changes occur. Then
4256 we rebuild the operation.
4258 May return NULL_RTX when no changes were made. */
4260 static rtx
4261 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4262 rtx op1)
4264 struct simplify_plus_minus_op_data
4266 rtx op;
4267 short neg;
4268 } ops[16];
4269 rtx result, tem;
4270 int n_ops = 2;
4271 int changed, n_constants, canonicalized = 0;
4272 int i, j;
4274 memset (ops, 0, sizeof ops);
4276 /* Set up the two operands and then expand them until nothing has been
4277 changed. If we run out of room in our array, give up; this should
4278 almost never happen. */
4280 ops[0].op = op0;
4281 ops[0].neg = 0;
4282 ops[1].op = op1;
4283 ops[1].neg = (code == MINUS);
4287 changed = 0;
4288 n_constants = 0;
4290 for (i = 0; i < n_ops; i++)
4292 rtx this_op = ops[i].op;
4293 int this_neg = ops[i].neg;
4294 enum rtx_code this_code = GET_CODE (this_op);
4296 switch (this_code)
4298 case PLUS:
4299 case MINUS:
4300 if (n_ops == ARRAY_SIZE (ops))
4301 return NULL_RTX;
4303 ops[n_ops].op = XEXP (this_op, 1);
4304 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4305 n_ops++;
4307 ops[i].op = XEXP (this_op, 0);
4308 changed = 1;
4309 /* If this operand was negated then we will potentially
4310 canonicalize the expression. Similarly if we don't
4311 place the operands adjacent we're re-ordering the
4312 expression and thus might be performing a
4313 canonicalization. Ignore register re-ordering.
4314 ??? It might be better to shuffle the ops array here,
4315 but then (plus (plus (A, B), plus (C, D))) wouldn't
4316 be seen as non-canonical. */
4317 if (this_neg
4318 || (i != n_ops - 2
4319 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4320 canonicalized = 1;
4321 break;
4323 case NEG:
4324 ops[i].op = XEXP (this_op, 0);
4325 ops[i].neg = ! this_neg;
4326 changed = 1;
4327 canonicalized = 1;
4328 break;
4330 case CONST:
4331 if (n_ops != ARRAY_SIZE (ops)
4332 && GET_CODE (XEXP (this_op, 0)) == PLUS
4333 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4334 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4336 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4337 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4338 ops[n_ops].neg = this_neg;
4339 n_ops++;
4340 changed = 1;
4341 canonicalized = 1;
4343 break;
4345 case NOT:
4346 /* ~a -> (-a - 1) */
4347 if (n_ops != ARRAY_SIZE (ops))
4349 ops[n_ops].op = CONSTM1_RTX (mode);
4350 ops[n_ops++].neg = this_neg;
4351 ops[i].op = XEXP (this_op, 0);
4352 ops[i].neg = !this_neg;
4353 changed = 1;
4354 canonicalized = 1;
4356 break;
4358 case CONST_INT:
4359 n_constants++;
4360 if (this_neg)
4362 ops[i].op = neg_const_int (mode, this_op);
4363 ops[i].neg = 0;
4364 changed = 1;
4365 canonicalized = 1;
4367 break;
4369 default:
4370 break;
4374 while (changed);
4376 if (n_constants > 1)
4377 canonicalized = 1;
4379 gcc_assert (n_ops >= 2);
4381 /* If we only have two operands, we can avoid the loops. */
4382 if (n_ops == 2)
4384 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4385 rtx lhs, rhs;
4387 /* Get the two operands. Be careful with the order, especially for
4388 the cases where code == MINUS. */
4389 if (ops[0].neg && ops[1].neg)
4391 lhs = gen_rtx_NEG (mode, ops[0].op);
4392 rhs = ops[1].op;
4394 else if (ops[0].neg)
4396 lhs = ops[1].op;
4397 rhs = ops[0].op;
4399 else
4401 lhs = ops[0].op;
4402 rhs = ops[1].op;
4405 return simplify_const_binary_operation (code, mode, lhs, rhs);
4408 /* Now simplify each pair of operands until nothing changes. */
4409 while (1)
4411 /* Insertion sort is good enough for a small array. */
4412 for (i = 1; i < n_ops; i++)
4414 struct simplify_plus_minus_op_data save;
4415 int cmp;
4417 j = i - 1;
4418 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4419 if (cmp <= 0)
4420 continue;
4421 /* Just swapping registers doesn't count as canonicalization. */
4422 if (cmp != 1)
4423 canonicalized = 1;
4425 save = ops[i];
4427 ops[j + 1] = ops[j];
4428 while (j--
4429 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4430 ops[j + 1] = save;
4433 changed = 0;
4434 for (i = n_ops - 1; i > 0; i--)
4435 for (j = i - 1; j >= 0; j--)
4437 rtx lhs = ops[j].op, rhs = ops[i].op;
4438 int lneg = ops[j].neg, rneg = ops[i].neg;
4440 if (lhs != 0 && rhs != 0)
4442 enum rtx_code ncode = PLUS;
4444 if (lneg != rneg)
4446 ncode = MINUS;
4447 if (lneg)
4448 std::swap (lhs, rhs);
4450 else if (swap_commutative_operands_p (lhs, rhs))
4451 std::swap (lhs, rhs);
4453 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4454 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4456 rtx tem_lhs, tem_rhs;
4458 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4459 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4460 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4461 tem_rhs);
4463 if (tem && !CONSTANT_P (tem))
4464 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4466 else
4467 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4469 if (tem)
4471 /* Reject "simplifications" that just wrap the two
4472 arguments in a CONST. Failure to do so can result
4473 in infinite recursion with simplify_binary_operation
4474 when it calls us to simplify CONST operations.
4475 Also, if we find such a simplification, don't try
4476 any more combinations with this rhs: We must have
4477 something like symbol+offset, ie. one of the
4478 trivial CONST expressions we handle later. */
4479 if (GET_CODE (tem) == CONST
4480 && GET_CODE (XEXP (tem, 0)) == ncode
4481 && XEXP (XEXP (tem, 0), 0) == lhs
4482 && XEXP (XEXP (tem, 0), 1) == rhs)
4483 break;
4484 lneg &= rneg;
4485 if (GET_CODE (tem) == NEG)
4486 tem = XEXP (tem, 0), lneg = !lneg;
4487 if (CONST_INT_P (tem) && lneg)
4488 tem = neg_const_int (mode, tem), lneg = 0;
4490 ops[i].op = tem;
4491 ops[i].neg = lneg;
4492 ops[j].op = NULL_RTX;
4493 changed = 1;
4494 canonicalized = 1;
4499 if (!changed)
4500 break;
4502 /* Pack all the operands to the lower-numbered entries. */
4503 for (i = 0, j = 0; j < n_ops; j++)
4504 if (ops[j].op)
4506 ops[i] = ops[j];
4507 i++;
4509 n_ops = i;
4512 /* If nothing changed, check that rematerialization of rtl instructions
4513 is still required. */
4514 if (!canonicalized)
4516 /* Perform rematerialization if only all operands are registers and
4517 all operations are PLUS. */
4518 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4519 around rs6000 and how it uses the CA register. See PR67145. */
4520 for (i = 0; i < n_ops; i++)
4521 if (ops[i].neg
4522 || !REG_P (ops[i].op)
4523 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4524 && fixed_regs[REGNO (ops[i].op)]
4525 && !global_regs[REGNO (ops[i].op)]
4526 && ops[i].op != frame_pointer_rtx
4527 && ops[i].op != arg_pointer_rtx
4528 && ops[i].op != stack_pointer_rtx))
4529 return NULL_RTX;
4530 goto gen_result;
4533 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4534 if (n_ops == 2
4535 && CONST_INT_P (ops[1].op)
4536 && CONSTANT_P (ops[0].op)
4537 && ops[0].neg)
4538 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4540 /* We suppressed creation of trivial CONST expressions in the
4541 combination loop to avoid recursion. Create one manually now.
4542 The combination loop should have ensured that there is exactly
4543 one CONST_INT, and the sort will have ensured that it is last
4544 in the array and that any other constant will be next-to-last. */
4546 if (n_ops > 1
4547 && CONST_INT_P (ops[n_ops - 1].op)
4548 && CONSTANT_P (ops[n_ops - 2].op))
4550 rtx value = ops[n_ops - 1].op;
4551 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4552 value = neg_const_int (mode, value);
4553 if (CONST_INT_P (value))
4555 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4556 INTVAL (value));
4557 n_ops--;
4561 /* Put a non-negated operand first, if possible. */
4563 for (i = 0; i < n_ops && ops[i].neg; i++)
4564 continue;
4565 if (i == n_ops)
4566 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4567 else if (i != 0)
4569 tem = ops[0].op;
4570 ops[0] = ops[i];
4571 ops[i].op = tem;
4572 ops[i].neg = 1;
4575 /* Now make the result by performing the requested operations. */
4576 gen_result:
4577 result = ops[0].op;
4578 for (i = 1; i < n_ops; i++)
4579 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4580 mode, result, ops[i].op);
4582 return result;
4585 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4586 static bool
4587 plus_minus_operand_p (const_rtx x)
4589 return GET_CODE (x) == PLUS
4590 || GET_CODE (x) == MINUS
4591 || (GET_CODE (x) == CONST
4592 && GET_CODE (XEXP (x, 0)) == PLUS
4593 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4594 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4597 /* Like simplify_binary_operation except used for relational operators.
4598 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4599 not also be VOIDmode.
4601 CMP_MODE specifies in which mode the comparison is done in, so it is
4602 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4603 the operands or, if both are VOIDmode, the operands are compared in
4604 "infinite precision". */
4606 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4607 machine_mode cmp_mode, rtx op0, rtx op1)
4609 rtx tem, trueop0, trueop1;
4611 if (cmp_mode == VOIDmode)
4612 cmp_mode = GET_MODE (op0);
4613 if (cmp_mode == VOIDmode)
4614 cmp_mode = GET_MODE (op1);
4616 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4617 if (tem)
4619 if (SCALAR_FLOAT_MODE_P (mode))
4621 if (tem == const0_rtx)
4622 return CONST0_RTX (mode);
4623 #ifdef FLOAT_STORE_FLAG_VALUE
4625 REAL_VALUE_TYPE val;
4626 val = FLOAT_STORE_FLAG_VALUE (mode);
4627 return const_double_from_real_value (val, mode);
4629 #else
4630 return NULL_RTX;
4631 #endif
4633 if (VECTOR_MODE_P (mode))
4635 if (tem == const0_rtx)
4636 return CONST0_RTX (mode);
4637 #ifdef VECTOR_STORE_FLAG_VALUE
4639 int i, units;
4640 rtvec v;
4642 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4643 if (val == NULL_RTX)
4644 return NULL_RTX;
4645 if (val == const1_rtx)
4646 return CONST1_RTX (mode);
4648 units = GET_MODE_NUNITS (mode);
4649 v = rtvec_alloc (units);
4650 for (i = 0; i < units; i++)
4651 RTVEC_ELT (v, i) = val;
4652 return gen_rtx_raw_CONST_VECTOR (mode, v);
4654 #else
4655 return NULL_RTX;
4656 #endif
4659 return tem;
4662 /* For the following tests, ensure const0_rtx is op1. */
4663 if (swap_commutative_operands_p (op0, op1)
4664 || (op0 == const0_rtx && op1 != const0_rtx))
4665 std::swap (op0, op1), code = swap_condition (code);
4667 /* If op0 is a compare, extract the comparison arguments from it. */
4668 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4669 return simplify_gen_relational (code, mode, VOIDmode,
4670 XEXP (op0, 0), XEXP (op0, 1));
4672 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4673 || CC0_P (op0))
4674 return NULL_RTX;
4676 trueop0 = avoid_constant_pool_reference (op0);
4677 trueop1 = avoid_constant_pool_reference (op1);
4678 return simplify_relational_operation_1 (code, mode, cmp_mode,
4679 trueop0, trueop1);
4682 /* This part of simplify_relational_operation is only used when CMP_MODE
4683 is not in class MODE_CC (i.e. it is a real comparison).
4685 MODE is the mode of the result, while CMP_MODE specifies in which
4686 mode the comparison is done in, so it is the mode of the operands. */
4688 static rtx
4689 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4690 machine_mode cmp_mode, rtx op0, rtx op1)
4692 enum rtx_code op0code = GET_CODE (op0);
4694 if (op1 == const0_rtx && COMPARISON_P (op0))
4696 /* If op0 is a comparison, extract the comparison arguments
4697 from it. */
4698 if (code == NE)
4700 if (GET_MODE (op0) == mode)
4701 return simplify_rtx (op0);
4702 else
4703 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4704 XEXP (op0, 0), XEXP (op0, 1));
4706 else if (code == EQ)
4708 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4709 if (new_code != UNKNOWN)
4710 return simplify_gen_relational (new_code, mode, VOIDmode,
4711 XEXP (op0, 0), XEXP (op0, 1));
4715 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4716 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4717 if ((code == LTU || code == GEU)
4718 && GET_CODE (op0) == PLUS
4719 && CONST_INT_P (XEXP (op0, 1))
4720 && (rtx_equal_p (op1, XEXP (op0, 0))
4721 || rtx_equal_p (op1, XEXP (op0, 1)))
4722 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4723 && XEXP (op0, 1) != const0_rtx)
4725 rtx new_cmp
4726 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4727 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4728 cmp_mode, XEXP (op0, 0), new_cmp);
4731 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4732 transformed into (LTU a -C). */
4733 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4734 && CONST_INT_P (XEXP (op0, 1))
4735 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4736 && XEXP (op0, 1) != const0_rtx)
4738 rtx new_cmp
4739 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4740 return simplify_gen_relational (LTU, mode, cmp_mode,
4741 XEXP (op0, 0), new_cmp);
4744 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4745 if ((code == LTU || code == GEU)
4746 && GET_CODE (op0) == PLUS
4747 && rtx_equal_p (op1, XEXP (op0, 1))
4748 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4749 && !rtx_equal_p (op1, XEXP (op0, 0)))
4750 return simplify_gen_relational (code, mode, cmp_mode, op0,
4751 copy_rtx (XEXP (op0, 0)));
4753 if (op1 == const0_rtx)
4755 /* Canonicalize (GTU x 0) as (NE x 0). */
4756 if (code == GTU)
4757 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4758 /* Canonicalize (LEU x 0) as (EQ x 0). */
4759 if (code == LEU)
4760 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4762 else if (op1 == const1_rtx)
4764 switch (code)
4766 case GE:
4767 /* Canonicalize (GE x 1) as (GT x 0). */
4768 return simplify_gen_relational (GT, mode, cmp_mode,
4769 op0, const0_rtx);
4770 case GEU:
4771 /* Canonicalize (GEU x 1) as (NE x 0). */
4772 return simplify_gen_relational (NE, mode, cmp_mode,
4773 op0, const0_rtx);
4774 case LT:
4775 /* Canonicalize (LT x 1) as (LE x 0). */
4776 return simplify_gen_relational (LE, mode, cmp_mode,
4777 op0, const0_rtx);
4778 case LTU:
4779 /* Canonicalize (LTU x 1) as (EQ x 0). */
4780 return simplify_gen_relational (EQ, mode, cmp_mode,
4781 op0, const0_rtx);
4782 default:
4783 break;
4786 else if (op1 == constm1_rtx)
4788 /* Canonicalize (LE x -1) as (LT x 0). */
4789 if (code == LE)
4790 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4791 /* Canonicalize (GT x -1) as (GE x 0). */
4792 if (code == GT)
4793 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4796 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4797 if ((code == EQ || code == NE)
4798 && (op0code == PLUS || op0code == MINUS)
4799 && CONSTANT_P (op1)
4800 && CONSTANT_P (XEXP (op0, 1))
4801 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4803 rtx x = XEXP (op0, 0);
4804 rtx c = XEXP (op0, 1);
4805 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4806 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4808 /* Detect an infinite recursive condition, where we oscillate at this
4809 simplification case between:
4810 A + B == C <---> C - B == A,
4811 where A, B, and C are all constants with non-simplifiable expressions,
4812 usually SYMBOL_REFs. */
4813 if (GET_CODE (tem) == invcode
4814 && CONSTANT_P (x)
4815 && rtx_equal_p (c, XEXP (tem, 1)))
4816 return NULL_RTX;
4818 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4821 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4822 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4823 if (code == NE
4824 && op1 == const0_rtx
4825 && GET_MODE_CLASS (mode) == MODE_INT
4826 && cmp_mode != VOIDmode
4827 /* ??? Work-around BImode bugs in the ia64 backend. */
4828 && mode != BImode
4829 && cmp_mode != BImode
4830 && nonzero_bits (op0, cmp_mode) == 1
4831 && STORE_FLAG_VALUE == 1)
4832 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4833 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4834 : lowpart_subreg (mode, op0, cmp_mode);
4836 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4837 if ((code == EQ || code == NE)
4838 && op1 == const0_rtx
4839 && op0code == XOR)
4840 return simplify_gen_relational (code, mode, cmp_mode,
4841 XEXP (op0, 0), XEXP (op0, 1));
4843 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4844 if ((code == EQ || code == NE)
4845 && op0code == XOR
4846 && rtx_equal_p (XEXP (op0, 0), op1)
4847 && !side_effects_p (XEXP (op0, 0)))
4848 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4849 CONST0_RTX (mode));
4851 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4852 if ((code == EQ || code == NE)
4853 && op0code == XOR
4854 && rtx_equal_p (XEXP (op0, 1), op1)
4855 && !side_effects_p (XEXP (op0, 1)))
4856 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4857 CONST0_RTX (mode));
4859 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4860 if ((code == EQ || code == NE)
4861 && op0code == XOR
4862 && CONST_SCALAR_INT_P (op1)
4863 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4864 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4865 simplify_gen_binary (XOR, cmp_mode,
4866 XEXP (op0, 1), op1));
4868 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4869 can be implemented with a BICS instruction on some targets, or
4870 constant-folded if y is a constant. */
4871 if ((code == EQ || code == NE)
4872 && op0code == AND
4873 && rtx_equal_p (XEXP (op0, 0), op1)
4874 && !side_effects_p (op1)
4875 && op1 != CONST0_RTX (cmp_mode))
4877 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4878 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4880 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4881 CONST0_RTX (cmp_mode));
4884 /* Likewise for (eq/ne (and x y) y). */
4885 if ((code == EQ || code == NE)
4886 && op0code == AND
4887 && rtx_equal_p (XEXP (op0, 1), op1)
4888 && !side_effects_p (op1)
4889 && op1 != CONST0_RTX (cmp_mode))
4891 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4892 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4894 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4895 CONST0_RTX (cmp_mode));
4898 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4899 if ((code == EQ || code == NE)
4900 && GET_CODE (op0) == BSWAP
4901 && CONST_SCALAR_INT_P (op1))
4902 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4903 simplify_gen_unary (BSWAP, cmp_mode,
4904 op1, cmp_mode));
4906 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4907 if ((code == EQ || code == NE)
4908 && GET_CODE (op0) == BSWAP
4909 && GET_CODE (op1) == BSWAP)
4910 return simplify_gen_relational (code, mode, cmp_mode,
4911 XEXP (op0, 0), XEXP (op1, 0));
4913 if (op0code == POPCOUNT && op1 == const0_rtx)
4914 switch (code)
4916 case EQ:
4917 case LE:
4918 case LEU:
4919 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4920 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4921 XEXP (op0, 0), const0_rtx);
4923 case NE:
4924 case GT:
4925 case GTU:
4926 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4927 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4928 XEXP (op0, 0), const0_rtx);
4930 default:
4931 break;
4934 return NULL_RTX;
4937 enum
4939 CMP_EQ = 1,
4940 CMP_LT = 2,
4941 CMP_GT = 4,
4942 CMP_LTU = 8,
4943 CMP_GTU = 16
4947 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4948 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4949 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4950 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4951 For floating-point comparisons, assume that the operands were ordered. */
4953 static rtx
4954 comparison_result (enum rtx_code code, int known_results)
4956 switch (code)
4958 case EQ:
4959 case UNEQ:
4960 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4961 case NE:
4962 case LTGT:
4963 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4965 case LT:
4966 case UNLT:
4967 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4968 case GE:
4969 case UNGE:
4970 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4972 case GT:
4973 case UNGT:
4974 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4975 case LE:
4976 case UNLE:
4977 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4979 case LTU:
4980 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4981 case GEU:
4982 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4984 case GTU:
4985 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4986 case LEU:
4987 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4989 case ORDERED:
4990 return const_true_rtx;
4991 case UNORDERED:
4992 return const0_rtx;
4993 default:
4994 gcc_unreachable ();
4998 /* Check if the given comparison (done in the given MODE) is actually
4999 a tautology or a contradiction. If the mode is VOID_mode, the
5000 comparison is done in "infinite precision". If no simplification
5001 is possible, this function returns zero. Otherwise, it returns
5002 either const_true_rtx or const0_rtx. */
5005 simplify_const_relational_operation (enum rtx_code code,
5006 machine_mode mode,
5007 rtx op0, rtx op1)
5009 rtx tem;
5010 rtx trueop0;
5011 rtx trueop1;
5013 gcc_assert (mode != VOIDmode
5014 || (GET_MODE (op0) == VOIDmode
5015 && GET_MODE (op1) == VOIDmode));
5017 /* If op0 is a compare, extract the comparison arguments from it. */
5018 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5020 op1 = XEXP (op0, 1);
5021 op0 = XEXP (op0, 0);
5023 if (GET_MODE (op0) != VOIDmode)
5024 mode = GET_MODE (op0);
5025 else if (GET_MODE (op1) != VOIDmode)
5026 mode = GET_MODE (op1);
5027 else
5028 return 0;
5031 /* We can't simplify MODE_CC values since we don't know what the
5032 actual comparison is. */
5033 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5034 return 0;
5036 /* Make sure the constant is second. */
5037 if (swap_commutative_operands_p (op0, op1))
5039 std::swap (op0, op1);
5040 code = swap_condition (code);
5043 trueop0 = avoid_constant_pool_reference (op0);
5044 trueop1 = avoid_constant_pool_reference (op1);
5046 /* For integer comparisons of A and B maybe we can simplify A - B and can
5047 then simplify a comparison of that with zero. If A and B are both either
5048 a register or a CONST_INT, this can't help; testing for these cases will
5049 prevent infinite recursion here and speed things up.
5051 We can only do this for EQ and NE comparisons as otherwise we may
5052 lose or introduce overflow which we cannot disregard as undefined as
5053 we do not know the signedness of the operation on either the left or
5054 the right hand side of the comparison. */
5056 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5057 && (code == EQ || code == NE)
5058 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5059 && (REG_P (op1) || CONST_INT_P (trueop1)))
5060 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5061 /* We cannot do this if tem is a nonzero address. */
5062 && ! nonzero_address_p (tem))
5063 return simplify_const_relational_operation (signed_condition (code),
5064 mode, tem, const0_rtx);
5066 if (! HONOR_NANS (mode) && code == ORDERED)
5067 return const_true_rtx;
5069 if (! HONOR_NANS (mode) && code == UNORDERED)
5070 return const0_rtx;
5072 /* For modes without NaNs, if the two operands are equal, we know the
5073 result except if they have side-effects. Even with NaNs we know
5074 the result of unordered comparisons and, if signaling NaNs are
5075 irrelevant, also the result of LT/GT/LTGT. */
5076 if ((! HONOR_NANS (trueop0)
5077 || code == UNEQ || code == UNLE || code == UNGE
5078 || ((code == LT || code == GT || code == LTGT)
5079 && ! HONOR_SNANS (trueop0)))
5080 && rtx_equal_p (trueop0, trueop1)
5081 && ! side_effects_p (trueop0))
5082 return comparison_result (code, CMP_EQ);
5084 /* If the operands are floating-point constants, see if we can fold
5085 the result. */
5086 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5087 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5088 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5090 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5091 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5093 /* Comparisons are unordered iff at least one of the values is NaN. */
5094 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5095 switch (code)
5097 case UNEQ:
5098 case UNLT:
5099 case UNGT:
5100 case UNLE:
5101 case UNGE:
5102 case NE:
5103 case UNORDERED:
5104 return const_true_rtx;
5105 case EQ:
5106 case LT:
5107 case GT:
5108 case LE:
5109 case GE:
5110 case LTGT:
5111 case ORDERED:
5112 return const0_rtx;
5113 default:
5114 return 0;
5117 return comparison_result (code,
5118 (real_equal (d0, d1) ? CMP_EQ :
5119 real_less (d0, d1) ? CMP_LT : CMP_GT));
5122 /* Otherwise, see if the operands are both integers. */
5123 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5124 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5126 /* It would be nice if we really had a mode here. However, the
5127 largest int representable on the target is as good as
5128 infinite. */
5129 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5130 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5131 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5133 if (wi::eq_p (ptrueop0, ptrueop1))
5134 return comparison_result (code, CMP_EQ);
5135 else
5137 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5138 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5139 return comparison_result (code, cr);
5143 /* Optimize comparisons with upper and lower bounds. */
5144 if (HWI_COMPUTABLE_MODE_P (mode)
5145 && CONST_INT_P (trueop1)
5146 && !side_effects_p (trueop0))
5148 int sign;
5149 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5150 HOST_WIDE_INT val = INTVAL (trueop1);
5151 HOST_WIDE_INT mmin, mmax;
5153 if (code == GEU
5154 || code == LEU
5155 || code == GTU
5156 || code == LTU)
5157 sign = 0;
5158 else
5159 sign = 1;
5161 /* Get a reduced range if the sign bit is zero. */
5162 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5164 mmin = 0;
5165 mmax = nonzero;
5167 else
5169 rtx mmin_rtx, mmax_rtx;
5170 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5172 mmin = INTVAL (mmin_rtx);
5173 mmax = INTVAL (mmax_rtx);
5174 if (sign)
5176 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5178 mmin >>= (sign_copies - 1);
5179 mmax >>= (sign_copies - 1);
5183 switch (code)
5185 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5186 case GEU:
5187 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5188 return const_true_rtx;
5189 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5190 return const0_rtx;
5191 break;
5192 case GE:
5193 if (val <= mmin)
5194 return const_true_rtx;
5195 if (val > mmax)
5196 return const0_rtx;
5197 break;
5199 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5200 case LEU:
5201 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5202 return const_true_rtx;
5203 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5204 return const0_rtx;
5205 break;
5206 case LE:
5207 if (val >= mmax)
5208 return const_true_rtx;
5209 if (val < mmin)
5210 return const0_rtx;
5211 break;
5213 case EQ:
5214 /* x == y is always false for y out of range. */
5215 if (val < mmin || val > mmax)
5216 return const0_rtx;
5217 break;
5219 /* x > y is always false for y >= mmax, always true for y < mmin. */
5220 case GTU:
5221 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5222 return const0_rtx;
5223 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5224 return const_true_rtx;
5225 break;
5226 case GT:
5227 if (val >= mmax)
5228 return const0_rtx;
5229 if (val < mmin)
5230 return const_true_rtx;
5231 break;
5233 /* x < y is always false for y <= mmin, always true for y > mmax. */
5234 case LTU:
5235 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5236 return const0_rtx;
5237 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5238 return const_true_rtx;
5239 break;
5240 case LT:
5241 if (val <= mmin)
5242 return const0_rtx;
5243 if (val > mmax)
5244 return const_true_rtx;
5245 break;
5247 case NE:
5248 /* x != y is always true for y out of range. */
5249 if (val < mmin || val > mmax)
5250 return const_true_rtx;
5251 break;
5253 default:
5254 break;
5258 /* Optimize integer comparisons with zero. */
5259 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5261 /* Some addresses are known to be nonzero. We don't know
5262 their sign, but equality comparisons are known. */
5263 if (nonzero_address_p (trueop0))
5265 if (code == EQ || code == LEU)
5266 return const0_rtx;
5267 if (code == NE || code == GTU)
5268 return const_true_rtx;
5271 /* See if the first operand is an IOR with a constant. If so, we
5272 may be able to determine the result of this comparison. */
5273 if (GET_CODE (op0) == IOR)
5275 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5276 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5278 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5279 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5280 && (UINTVAL (inner_const)
5281 & (HOST_WIDE_INT_1U
5282 << sign_bitnum)));
5284 switch (code)
5286 case EQ:
5287 case LEU:
5288 return const0_rtx;
5289 case NE:
5290 case GTU:
5291 return const_true_rtx;
5292 case LT:
5293 case LE:
5294 if (has_sign)
5295 return const_true_rtx;
5296 break;
5297 case GT:
5298 case GE:
5299 if (has_sign)
5300 return const0_rtx;
5301 break;
5302 default:
5303 break;
5309 /* Optimize comparison of ABS with zero. */
5310 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5311 && (GET_CODE (trueop0) == ABS
5312 || (GET_CODE (trueop0) == FLOAT_EXTEND
5313 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5315 switch (code)
5317 case LT:
5318 /* Optimize abs(x) < 0.0. */
5319 if (!HONOR_SNANS (mode)
5320 && (!INTEGRAL_MODE_P (mode)
5321 || (!flag_wrapv && !flag_trapv)))
5323 if (INTEGRAL_MODE_P (mode)
5324 && (issue_strict_overflow_warning
5325 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5326 warning (OPT_Wstrict_overflow,
5327 ("assuming signed overflow does not occur when "
5328 "assuming abs (x) < 0 is false"));
5329 return const0_rtx;
5331 break;
5333 case GE:
5334 /* Optimize abs(x) >= 0.0. */
5335 if (!HONOR_NANS (mode)
5336 && (!INTEGRAL_MODE_P (mode)
5337 || (!flag_wrapv && !flag_trapv)))
5339 if (INTEGRAL_MODE_P (mode)
5340 && (issue_strict_overflow_warning
5341 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5342 warning (OPT_Wstrict_overflow,
5343 ("assuming signed overflow does not occur when "
5344 "assuming abs (x) >= 0 is true"));
5345 return const_true_rtx;
5347 break;
5349 case UNGE:
5350 /* Optimize ! (abs(x) < 0.0). */
5351 return const_true_rtx;
5353 default:
5354 break;
5358 return 0;
5361 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5362 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5363 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5364 can be simplified to that or NULL_RTX if not.
5365 Assume X is compared against zero with CMP_CODE and the true
5366 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5368 static rtx
5369 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5371 if (cmp_code != EQ && cmp_code != NE)
5372 return NULL_RTX;
5374 /* Result on X == 0 and X !=0 respectively. */
5375 rtx on_zero, on_nonzero;
5376 if (cmp_code == EQ)
5378 on_zero = true_val;
5379 on_nonzero = false_val;
5381 else
5383 on_zero = false_val;
5384 on_nonzero = true_val;
5387 rtx_code op_code = GET_CODE (on_nonzero);
5388 if ((op_code != CLZ && op_code != CTZ)
5389 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5390 || !CONST_INT_P (on_zero))
5391 return NULL_RTX;
5393 HOST_WIDE_INT op_val;
5394 if (((op_code == CLZ
5395 && CLZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val))
5396 || (op_code == CTZ
5397 && CTZ_DEFINED_VALUE_AT_ZERO (GET_MODE (on_nonzero), op_val)))
5398 && op_val == INTVAL (on_zero))
5399 return on_nonzero;
5401 return NULL_RTX;
5405 /* Simplify CODE, an operation with result mode MODE and three operands,
5406 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5407 a constant. Return 0 if no simplifications is possible. */
5410 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5411 machine_mode op0_mode, rtx op0, rtx op1,
5412 rtx op2)
5414 unsigned int width = GET_MODE_PRECISION (mode);
5415 bool any_change = false;
5416 rtx tem, trueop2;
5418 /* VOIDmode means "infinite" precision. */
5419 if (width == 0)
5420 width = HOST_BITS_PER_WIDE_INT;
5422 switch (code)
5424 case FMA:
5425 /* Simplify negations around the multiplication. */
5426 /* -a * -b + c => a * b + c. */
5427 if (GET_CODE (op0) == NEG)
5429 tem = simplify_unary_operation (NEG, mode, op1, mode);
5430 if (tem)
5431 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5433 else if (GET_CODE (op1) == NEG)
5435 tem = simplify_unary_operation (NEG, mode, op0, mode);
5436 if (tem)
5437 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5440 /* Canonicalize the two multiplication operands. */
5441 /* a * -b + c => -b * a + c. */
5442 if (swap_commutative_operands_p (op0, op1))
5443 std::swap (op0, op1), any_change = true;
5445 if (any_change)
5446 return gen_rtx_FMA (mode, op0, op1, op2);
5447 return NULL_RTX;
5449 case SIGN_EXTRACT:
5450 case ZERO_EXTRACT:
5451 if (CONST_INT_P (op0)
5452 && CONST_INT_P (op1)
5453 && CONST_INT_P (op2)
5454 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5455 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5457 /* Extracting a bit-field from a constant */
5458 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5459 HOST_WIDE_INT op1val = INTVAL (op1);
5460 HOST_WIDE_INT op2val = INTVAL (op2);
5461 if (BITS_BIG_ENDIAN)
5462 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5463 else
5464 val >>= op2val;
5466 if (HOST_BITS_PER_WIDE_INT != op1val)
5468 /* First zero-extend. */
5469 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5470 /* If desired, propagate sign bit. */
5471 if (code == SIGN_EXTRACT
5472 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5473 != 0)
5474 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5477 return gen_int_mode (val, mode);
5479 break;
5481 case IF_THEN_ELSE:
5482 if (CONST_INT_P (op0))
5483 return op0 != const0_rtx ? op1 : op2;
5485 /* Convert c ? a : a into "a". */
5486 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5487 return op1;
5489 /* Convert a != b ? a : b into "a". */
5490 if (GET_CODE (op0) == NE
5491 && ! side_effects_p (op0)
5492 && ! HONOR_NANS (mode)
5493 && ! HONOR_SIGNED_ZEROS (mode)
5494 && ((rtx_equal_p (XEXP (op0, 0), op1)
5495 && rtx_equal_p (XEXP (op0, 1), op2))
5496 || (rtx_equal_p (XEXP (op0, 0), op2)
5497 && rtx_equal_p (XEXP (op0, 1), op1))))
5498 return op1;
5500 /* Convert a == b ? a : b into "b". */
5501 if (GET_CODE (op0) == EQ
5502 && ! side_effects_p (op0)
5503 && ! HONOR_NANS (mode)
5504 && ! HONOR_SIGNED_ZEROS (mode)
5505 && ((rtx_equal_p (XEXP (op0, 0), op1)
5506 && rtx_equal_p (XEXP (op0, 1), op2))
5507 || (rtx_equal_p (XEXP (op0, 0), op2)
5508 && rtx_equal_p (XEXP (op0, 1), op1))))
5509 return op2;
5511 /* Convert (!c) != {0,...,0} ? a : b into
5512 c != {0,...,0} ? b : a for vector modes. */
5513 if (VECTOR_MODE_P (GET_MODE (op1))
5514 && GET_CODE (op0) == NE
5515 && GET_CODE (XEXP (op0, 0)) == NOT
5516 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5518 rtx cv = XEXP (op0, 1);
5519 int nunits = CONST_VECTOR_NUNITS (cv);
5520 bool ok = true;
5521 for (int i = 0; i < nunits; ++i)
5522 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5524 ok = false;
5525 break;
5527 if (ok)
5529 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5530 XEXP (XEXP (op0, 0), 0),
5531 XEXP (op0, 1));
5532 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5533 return retval;
5537 /* Convert x == 0 ? N : clz (x) into clz (x) when
5538 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5539 Similarly for ctz (x). */
5540 if (COMPARISON_P (op0) && !side_effects_p (op0)
5541 && XEXP (op0, 1) == const0_rtx)
5543 rtx simplified
5544 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5545 op1, op2);
5546 if (simplified)
5547 return simplified;
5550 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5552 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5553 ? GET_MODE (XEXP (op0, 1))
5554 : GET_MODE (XEXP (op0, 0)));
5555 rtx temp;
5557 /* Look for happy constants in op1 and op2. */
5558 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5560 HOST_WIDE_INT t = INTVAL (op1);
5561 HOST_WIDE_INT f = INTVAL (op2);
5563 if (t == STORE_FLAG_VALUE && f == 0)
5564 code = GET_CODE (op0);
5565 else if (t == 0 && f == STORE_FLAG_VALUE)
5567 enum rtx_code tmp;
5568 tmp = reversed_comparison_code (op0, NULL);
5569 if (tmp == UNKNOWN)
5570 break;
5571 code = tmp;
5573 else
5574 break;
5576 return simplify_gen_relational (code, mode, cmp_mode,
5577 XEXP (op0, 0), XEXP (op0, 1));
5580 if (cmp_mode == VOIDmode)
5581 cmp_mode = op0_mode;
5582 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5583 cmp_mode, XEXP (op0, 0),
5584 XEXP (op0, 1));
5586 /* See if any simplifications were possible. */
5587 if (temp)
5589 if (CONST_INT_P (temp))
5590 return temp == const0_rtx ? op2 : op1;
5591 else if (temp)
5592 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5595 break;
5597 case VEC_MERGE:
5598 gcc_assert (GET_MODE (op0) == mode);
5599 gcc_assert (GET_MODE (op1) == mode);
5600 gcc_assert (VECTOR_MODE_P (mode));
5601 trueop2 = avoid_constant_pool_reference (op2);
5602 if (CONST_INT_P (trueop2))
5604 int elt_size = GET_MODE_UNIT_SIZE (mode);
5605 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5606 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5607 unsigned HOST_WIDE_INT mask;
5608 if (n_elts == HOST_BITS_PER_WIDE_INT)
5609 mask = -1;
5610 else
5611 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5613 if (!(sel & mask) && !side_effects_p (op0))
5614 return op1;
5615 if ((sel & mask) == mask && !side_effects_p (op1))
5616 return op0;
5618 rtx trueop0 = avoid_constant_pool_reference (op0);
5619 rtx trueop1 = avoid_constant_pool_reference (op1);
5620 if (GET_CODE (trueop0) == CONST_VECTOR
5621 && GET_CODE (trueop1) == CONST_VECTOR)
5623 rtvec v = rtvec_alloc (n_elts);
5624 unsigned int i;
5626 for (i = 0; i < n_elts; i++)
5627 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5628 ? CONST_VECTOR_ELT (trueop0, i)
5629 : CONST_VECTOR_ELT (trueop1, i));
5630 return gen_rtx_CONST_VECTOR (mode, v);
5633 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5634 if no element from a appears in the result. */
5635 if (GET_CODE (op0) == VEC_MERGE)
5637 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5638 if (CONST_INT_P (tem))
5640 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5641 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5642 return simplify_gen_ternary (code, mode, mode,
5643 XEXP (op0, 1), op1, op2);
5644 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5645 return simplify_gen_ternary (code, mode, mode,
5646 XEXP (op0, 0), op1, op2);
5649 if (GET_CODE (op1) == VEC_MERGE)
5651 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5652 if (CONST_INT_P (tem))
5654 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5655 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5656 return simplify_gen_ternary (code, mode, mode,
5657 op0, XEXP (op1, 1), op2);
5658 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5659 return simplify_gen_ternary (code, mode, mode,
5660 op0, XEXP (op1, 0), op2);
5664 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5665 with a. */
5666 if (GET_CODE (op0) == VEC_DUPLICATE
5667 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5668 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5669 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5671 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5672 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5674 if (XEXP (XEXP (op0, 0), 0) == op1
5675 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5676 return op1;
5681 if (rtx_equal_p (op0, op1)
5682 && !side_effects_p (op2) && !side_effects_p (op1))
5683 return op0;
5685 break;
5687 default:
5688 gcc_unreachable ();
5691 return 0;
5694 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5695 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5696 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5698 Works by unpacking OP into a collection of 8-bit values
5699 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5700 and then repacking them again for OUTERMODE. */
5702 static rtx
5703 simplify_immed_subreg (machine_mode outermode, rtx op,
5704 machine_mode innermode, unsigned int byte)
5706 enum {
5707 value_bit = 8,
5708 value_mask = (1 << value_bit) - 1
5710 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5711 int value_start;
5712 int i;
5713 int elem;
5715 int num_elem;
5716 rtx * elems;
5717 int elem_bitsize;
5718 rtx result_s = NULL;
5719 rtvec result_v = NULL;
5720 enum mode_class outer_class;
5721 machine_mode outer_submode;
5722 int max_bitsize;
5724 /* Some ports misuse CCmode. */
5725 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5726 return op;
5728 /* We have no way to represent a complex constant at the rtl level. */
5729 if (COMPLEX_MODE_P (outermode))
5730 return NULL_RTX;
5732 /* We support any size mode. */
5733 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5734 GET_MODE_BITSIZE (innermode));
5736 /* Unpack the value. */
5738 if (GET_CODE (op) == CONST_VECTOR)
5740 num_elem = CONST_VECTOR_NUNITS (op);
5741 elems = &CONST_VECTOR_ELT (op, 0);
5742 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5744 else
5746 num_elem = 1;
5747 elems = &op;
5748 elem_bitsize = max_bitsize;
5750 /* If this asserts, it is too complicated; reducing value_bit may help. */
5751 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5752 /* I don't know how to handle endianness of sub-units. */
5753 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5755 for (elem = 0; elem < num_elem; elem++)
5757 unsigned char * vp;
5758 rtx el = elems[elem];
5760 /* Vectors are kept in target memory order. (This is probably
5761 a mistake.) */
5763 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5764 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5765 / BITS_PER_UNIT);
5766 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5767 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5768 unsigned bytele = (subword_byte % UNITS_PER_WORD
5769 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5770 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5773 switch (GET_CODE (el))
5775 case CONST_INT:
5776 for (i = 0;
5777 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5778 i += value_bit)
5779 *vp++ = INTVAL (el) >> i;
5780 /* CONST_INTs are always logically sign-extended. */
5781 for (; i < elem_bitsize; i += value_bit)
5782 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5783 break;
5785 case CONST_WIDE_INT:
5787 rtx_mode_t val = rtx_mode_t (el, innermode);
5788 unsigned char extend = wi::sign_mask (val);
5789 int prec = wi::get_precision (val);
5791 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5792 *vp++ = wi::extract_uhwi (val, i, value_bit);
5793 for (; i < elem_bitsize; i += value_bit)
5794 *vp++ = extend;
5796 break;
5798 case CONST_DOUBLE:
5799 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5801 unsigned char extend = 0;
5802 /* If this triggers, someone should have generated a
5803 CONST_INT instead. */
5804 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5806 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5807 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5808 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5810 *vp++
5811 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5812 i += value_bit;
5815 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5816 extend = -1;
5817 for (; i < elem_bitsize; i += value_bit)
5818 *vp++ = extend;
5820 else
5822 /* This is big enough for anything on the platform. */
5823 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5824 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5826 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5827 gcc_assert (bitsize <= elem_bitsize);
5828 gcc_assert (bitsize % value_bit == 0);
5830 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5831 GET_MODE (el));
5833 /* real_to_target produces its result in words affected by
5834 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5835 and use WORDS_BIG_ENDIAN instead; see the documentation
5836 of SUBREG in rtl.texi. */
5837 for (i = 0; i < bitsize; i += value_bit)
5839 int ibase;
5840 if (WORDS_BIG_ENDIAN)
5841 ibase = bitsize - 1 - i;
5842 else
5843 ibase = i;
5844 *vp++ = tmp[ibase / 32] >> i % 32;
5847 /* It shouldn't matter what's done here, so fill it with
5848 zero. */
5849 for (; i < elem_bitsize; i += value_bit)
5850 *vp++ = 0;
5852 break;
5854 case CONST_FIXED:
5855 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5857 for (i = 0; i < elem_bitsize; i += value_bit)
5858 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5860 else
5862 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5863 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5864 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5865 i += value_bit)
5866 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5867 >> (i - HOST_BITS_PER_WIDE_INT);
5868 for (; i < elem_bitsize; i += value_bit)
5869 *vp++ = 0;
5871 break;
5873 default:
5874 gcc_unreachable ();
5878 /* Now, pick the right byte to start with. */
5879 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5880 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5881 will already have offset 0. */
5882 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5884 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5885 - byte);
5886 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5887 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5888 byte = (subword_byte % UNITS_PER_WORD
5889 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5892 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5893 so if it's become negative it will instead be very large.) */
5894 gcc_assert (byte < GET_MODE_SIZE (innermode));
5896 /* Convert from bytes to chunks of size value_bit. */
5897 value_start = byte * (BITS_PER_UNIT / value_bit);
5899 /* Re-pack the value. */
5900 num_elem = GET_MODE_NUNITS (outermode);
5902 if (VECTOR_MODE_P (outermode))
5904 result_v = rtvec_alloc (num_elem);
5905 elems = &RTVEC_ELT (result_v, 0);
5907 else
5908 elems = &result_s;
5910 outer_submode = GET_MODE_INNER (outermode);
5911 outer_class = GET_MODE_CLASS (outer_submode);
5912 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5914 gcc_assert (elem_bitsize % value_bit == 0);
5915 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5917 for (elem = 0; elem < num_elem; elem++)
5919 unsigned char *vp;
5921 /* Vectors are stored in target memory order. (This is probably
5922 a mistake.) */
5924 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5925 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5926 / BITS_PER_UNIT);
5927 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5928 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5929 unsigned bytele = (subword_byte % UNITS_PER_WORD
5930 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5931 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5934 switch (outer_class)
5936 case MODE_INT:
5937 case MODE_PARTIAL_INT:
5939 int u;
5940 int base = 0;
5941 int units
5942 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5943 / HOST_BITS_PER_WIDE_INT;
5944 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5945 wide_int r;
5947 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5948 return NULL_RTX;
5949 for (u = 0; u < units; u++)
5951 unsigned HOST_WIDE_INT buf = 0;
5952 for (i = 0;
5953 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5954 i += value_bit)
5955 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5957 tmp[u] = buf;
5958 base += HOST_BITS_PER_WIDE_INT;
5960 r = wide_int::from_array (tmp, units,
5961 GET_MODE_PRECISION (outer_submode));
5962 #if TARGET_SUPPORTS_WIDE_INT == 0
5963 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5964 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5965 return NULL_RTX;
5966 #endif
5967 elems[elem] = immed_wide_int_const (r, outer_submode);
5969 break;
5971 case MODE_FLOAT:
5972 case MODE_DECIMAL_FLOAT:
5974 REAL_VALUE_TYPE r;
5975 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
5977 /* real_from_target wants its input in words affected by
5978 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5979 and use WORDS_BIG_ENDIAN instead; see the documentation
5980 of SUBREG in rtl.texi. */
5981 for (i = 0; i < elem_bitsize; i += value_bit)
5983 int ibase;
5984 if (WORDS_BIG_ENDIAN)
5985 ibase = elem_bitsize - 1 - i;
5986 else
5987 ibase = i;
5988 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5991 real_from_target (&r, tmp, outer_submode);
5992 elems[elem] = const_double_from_real_value (r, outer_submode);
5994 break;
5996 case MODE_FRACT:
5997 case MODE_UFRACT:
5998 case MODE_ACCUM:
5999 case MODE_UACCUM:
6001 FIXED_VALUE_TYPE f;
6002 f.data.low = 0;
6003 f.data.high = 0;
6004 f.mode = outer_submode;
6006 for (i = 0;
6007 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6008 i += value_bit)
6009 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6010 for (; i < elem_bitsize; i += value_bit)
6011 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6012 << (i - HOST_BITS_PER_WIDE_INT));
6014 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6016 break;
6018 default:
6019 gcc_unreachable ();
6022 if (VECTOR_MODE_P (outermode))
6023 return gen_rtx_CONST_VECTOR (outermode, result_v);
6024 else
6025 return result_s;
6028 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6029 Return 0 if no simplifications are possible. */
6031 simplify_subreg (machine_mode outermode, rtx op,
6032 machine_mode innermode, unsigned int byte)
6034 /* Little bit of sanity checking. */
6035 gcc_assert (innermode != VOIDmode);
6036 gcc_assert (outermode != VOIDmode);
6037 gcc_assert (innermode != BLKmode);
6038 gcc_assert (outermode != BLKmode);
6040 gcc_assert (GET_MODE (op) == innermode
6041 || GET_MODE (op) == VOIDmode);
6043 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6044 return NULL_RTX;
6046 if (byte >= GET_MODE_SIZE (innermode))
6047 return NULL_RTX;
6049 if (outermode == innermode && !byte)
6050 return op;
6052 if (CONST_SCALAR_INT_P (op)
6053 || CONST_DOUBLE_AS_FLOAT_P (op)
6054 || GET_CODE (op) == CONST_FIXED
6055 || GET_CODE (op) == CONST_VECTOR)
6056 return simplify_immed_subreg (outermode, op, innermode, byte);
6058 /* Changing mode twice with SUBREG => just change it once,
6059 or not at all if changing back op starting mode. */
6060 if (GET_CODE (op) == SUBREG)
6062 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6063 int final_offset = byte + SUBREG_BYTE (op);
6064 rtx newx;
6066 if (outermode == innermostmode
6067 && byte == 0 && SUBREG_BYTE (op) == 0)
6068 return SUBREG_REG (op);
6070 /* The SUBREG_BYTE represents offset, as if the value were stored
6071 in memory. Irritating exception is paradoxical subreg, where
6072 we define SUBREG_BYTE to be 0. On big endian machines, this
6073 value should be negative. For a moment, undo this exception. */
6074 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6076 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
6077 if (WORDS_BIG_ENDIAN)
6078 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6079 if (BYTES_BIG_ENDIAN)
6080 final_offset += difference % UNITS_PER_WORD;
6082 if (SUBREG_BYTE (op) == 0
6083 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
6085 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
6086 if (WORDS_BIG_ENDIAN)
6087 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6088 if (BYTES_BIG_ENDIAN)
6089 final_offset += difference % UNITS_PER_WORD;
6092 /* See whether resulting subreg will be paradoxical. */
6093 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
6095 /* In nonparadoxical subregs we can't handle negative offsets. */
6096 if (final_offset < 0)
6097 return NULL_RTX;
6098 /* Bail out in case resulting subreg would be incorrect. */
6099 if (final_offset % GET_MODE_SIZE (outermode)
6100 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6101 return NULL_RTX;
6103 else
6105 int offset = 0;
6106 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
6108 /* In paradoxical subreg, see if we are still looking on lower part.
6109 If so, our SUBREG_BYTE will be 0. */
6110 if (WORDS_BIG_ENDIAN)
6111 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6112 if (BYTES_BIG_ENDIAN)
6113 offset += difference % UNITS_PER_WORD;
6114 if (offset == final_offset)
6115 final_offset = 0;
6116 else
6117 return NULL_RTX;
6120 /* Recurse for further possible simplifications. */
6121 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6122 final_offset);
6123 if (newx)
6124 return newx;
6125 if (validate_subreg (outermode, innermostmode,
6126 SUBREG_REG (op), final_offset))
6128 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6129 if (SUBREG_PROMOTED_VAR_P (op)
6130 && SUBREG_PROMOTED_SIGN (op) >= 0
6131 && GET_MODE_CLASS (outermode) == MODE_INT
6132 && IN_RANGE (GET_MODE_SIZE (outermode),
6133 GET_MODE_SIZE (innermode),
6134 GET_MODE_SIZE (innermostmode))
6135 && subreg_lowpart_p (newx))
6137 SUBREG_PROMOTED_VAR_P (newx) = 1;
6138 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6140 return newx;
6142 return NULL_RTX;
6145 /* SUBREG of a hard register => just change the register number
6146 and/or mode. If the hard register is not valid in that mode,
6147 suppress this simplification. If the hard register is the stack,
6148 frame, or argument pointer, leave this as a SUBREG. */
6150 if (REG_P (op) && HARD_REGISTER_P (op))
6152 unsigned int regno, final_regno;
6154 regno = REGNO (op);
6155 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6156 if (HARD_REGISTER_NUM_P (final_regno))
6158 rtx x;
6159 int final_offset = byte;
6161 /* Adjust offset for paradoxical subregs. */
6162 if (byte == 0
6163 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6165 int difference = (GET_MODE_SIZE (innermode)
6166 - GET_MODE_SIZE (outermode));
6167 if (WORDS_BIG_ENDIAN)
6168 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6169 if (BYTES_BIG_ENDIAN)
6170 final_offset += difference % UNITS_PER_WORD;
6173 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6175 /* Propagate original regno. We don't have any way to specify
6176 the offset inside original regno, so do so only for lowpart.
6177 The information is used only by alias analysis that can not
6178 grog partial register anyway. */
6180 if (subreg_lowpart_offset (outermode, innermode) == byte)
6181 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6182 return x;
6186 /* If we have a SUBREG of a register that we are replacing and we are
6187 replacing it with a MEM, make a new MEM and try replacing the
6188 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6189 or if we would be widening it. */
6191 if (MEM_P (op)
6192 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6193 /* Allow splitting of volatile memory references in case we don't
6194 have instruction to move the whole thing. */
6195 && (! MEM_VOLATILE_P (op)
6196 || ! have_insn_for (SET, innermode))
6197 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6198 return adjust_address_nv (op, outermode, byte);
6200 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6201 of two parts. */
6202 if (GET_CODE (op) == CONCAT
6203 || GET_CODE (op) == VEC_CONCAT)
6205 unsigned int part_size, final_offset;
6206 rtx part, res;
6208 enum machine_mode part_mode = GET_MODE (XEXP (op, 0));
6209 if (part_mode == VOIDmode)
6210 part_mode = GET_MODE_INNER (GET_MODE (op));
6211 part_size = GET_MODE_SIZE (part_mode);
6212 if (byte < part_size)
6214 part = XEXP (op, 0);
6215 final_offset = byte;
6217 else
6219 part = XEXP (op, 1);
6220 final_offset = byte - part_size;
6223 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6224 return NULL_RTX;
6226 part_mode = GET_MODE (part);
6227 if (part_mode == VOIDmode)
6228 part_mode = GET_MODE_INNER (GET_MODE (op));
6229 res = simplify_subreg (outermode, part, part_mode, final_offset);
6230 if (res)
6231 return res;
6232 if (validate_subreg (outermode, part_mode, part, final_offset))
6233 return gen_rtx_SUBREG (outermode, part, final_offset);
6234 return NULL_RTX;
6237 /* A SUBREG resulting from a zero extension may fold to zero if
6238 it extracts higher bits that the ZERO_EXTEND's source bits. */
6239 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6241 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6242 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6243 return CONST0_RTX (outermode);
6246 if (SCALAR_INT_MODE_P (outermode)
6247 && SCALAR_INT_MODE_P (innermode)
6248 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6249 && byte == subreg_lowpart_offset (outermode, innermode))
6251 rtx tem = simplify_truncation (outermode, op, innermode);
6252 if (tem)
6253 return tem;
6256 return NULL_RTX;
6259 /* Make a SUBREG operation or equivalent if it folds. */
6262 simplify_gen_subreg (machine_mode outermode, rtx op,
6263 machine_mode innermode, unsigned int byte)
6265 rtx newx;
6267 newx = simplify_subreg (outermode, op, innermode, byte);
6268 if (newx)
6269 return newx;
6271 if (GET_CODE (op) == SUBREG
6272 || GET_CODE (op) == CONCAT
6273 || GET_MODE (op) == VOIDmode)
6274 return NULL_RTX;
6276 if (validate_subreg (outermode, innermode, op, byte))
6277 return gen_rtx_SUBREG (outermode, op, byte);
6279 return NULL_RTX;
6282 /* Generates a subreg to get the least significant part of EXPR (in mode
6283 INNER_MODE) to OUTER_MODE. */
6286 lowpart_subreg (machine_mode outer_mode, rtx expr,
6287 machine_mode inner_mode)
6289 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6290 subreg_lowpart_offset (outer_mode, inner_mode));
6293 /* Simplify X, an rtx expression.
6295 Return the simplified expression or NULL if no simplifications
6296 were possible.
6298 This is the preferred entry point into the simplification routines;
6299 however, we still allow passes to call the more specific routines.
6301 Right now GCC has three (yes, three) major bodies of RTL simplification
6302 code that need to be unified.
6304 1. fold_rtx in cse.c. This code uses various CSE specific
6305 information to aid in RTL simplification.
6307 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6308 it uses combine specific information to aid in RTL
6309 simplification.
6311 3. The routines in this file.
6314 Long term we want to only have one body of simplification code; to
6315 get to that state I recommend the following steps:
6317 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6318 which are not pass dependent state into these routines.
6320 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6321 use this routine whenever possible.
6323 3. Allow for pass dependent state to be provided to these
6324 routines and add simplifications based on the pass dependent
6325 state. Remove code from cse.c & combine.c that becomes
6326 redundant/dead.
6328 It will take time, but ultimately the compiler will be easier to
6329 maintain and improve. It's totally silly that when we add a
6330 simplification that it needs to be added to 4 places (3 for RTL
6331 simplification and 1 for tree simplification. */
6334 simplify_rtx (const_rtx x)
6336 const enum rtx_code code = GET_CODE (x);
6337 const machine_mode mode = GET_MODE (x);
6339 switch (GET_RTX_CLASS (code))
6341 case RTX_UNARY:
6342 return simplify_unary_operation (code, mode,
6343 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6344 case RTX_COMM_ARITH:
6345 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6346 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6348 /* Fall through. */
6350 case RTX_BIN_ARITH:
6351 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6353 case RTX_TERNARY:
6354 case RTX_BITFIELD_OPS:
6355 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6356 XEXP (x, 0), XEXP (x, 1),
6357 XEXP (x, 2));
6359 case RTX_COMPARE:
6360 case RTX_COMM_COMPARE:
6361 return simplify_relational_operation (code, mode,
6362 ((GET_MODE (XEXP (x, 0))
6363 != VOIDmode)
6364 ? GET_MODE (XEXP (x, 0))
6365 : GET_MODE (XEXP (x, 1))),
6366 XEXP (x, 0),
6367 XEXP (x, 1));
6369 case RTX_EXTRA:
6370 if (code == SUBREG)
6371 return simplify_subreg (mode, SUBREG_REG (x),
6372 GET_MODE (SUBREG_REG (x)),
6373 SUBREG_BYTE (x));
6374 break;
6376 case RTX_OBJ:
6377 if (code == LO_SUM)
6379 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6380 if (GET_CODE (XEXP (x, 0)) == HIGH
6381 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6382 return XEXP (x, 1);
6384 break;
6386 default:
6387 break;
6389 return NULL;