* testsuite/experimental/random/randint.cc: Add dg-add-options tls.
[official-gcc.git] / gcc / simplify-rtx.c
blobc4fc42aebcf2d80c8747ecc6becf0f5f3b827e6d
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "optabs.h"
30 #include "emit-rtl.h"
31 #include "recog.h"
32 #include "diagnostic-core.h"
33 #include "varasm.h"
34 #include "flags.h"
36 /* Simplification and canonicalization of RTL. */
38 /* Much code operates on (low, high) pairs; the low value is an
39 unsigned wide int, the high value a signed wide int. We
40 occasionally need to sign extend from low to high as if low were a
41 signed wide int. */
42 #define HWI_SIGN_EXTEND(low) \
43 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
45 static rtx neg_const_int (machine_mode, const_rtx);
46 static bool plus_minus_operand_p (const_rtx);
47 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
48 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
49 unsigned int);
50 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
51 rtx, rtx);
52 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
53 machine_mode, rtx, rtx);
54 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
55 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
56 rtx, rtx, rtx, rtx);
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
66 /* Test whether expression, X, is an immediate constant that represents
67 the most significant bit of machine mode MODE. */
69 bool
70 mode_signbit_p (machine_mode mode, const_rtx x)
72 unsigned HOST_WIDE_INT val;
73 unsigned int width;
75 if (GET_MODE_CLASS (mode) != MODE_INT)
76 return false;
78 width = GET_MODE_PRECISION (mode);
79 if (width == 0)
80 return false;
82 if (width <= HOST_BITS_PER_WIDE_INT
83 && CONST_INT_P (x))
84 val = INTVAL (x);
85 #if TARGET_SUPPORTS_WIDE_INT
86 else if (CONST_WIDE_INT_P (x))
88 unsigned int i;
89 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
90 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
91 return false;
92 for (i = 0; i < elts - 1; i++)
93 if (CONST_WIDE_INT_ELT (x, i) != 0)
94 return false;
95 val = CONST_WIDE_INT_ELT (x, elts - 1);
96 width %= HOST_BITS_PER_WIDE_INT;
97 if (width == 0)
98 width = HOST_BITS_PER_WIDE_INT;
100 #else
101 else if (width <= HOST_BITS_PER_DOUBLE_INT
102 && CONST_DOUBLE_AS_INT_P (x)
103 && CONST_DOUBLE_LOW (x) == 0)
105 val = CONST_DOUBLE_HIGH (x);
106 width -= HOST_BITS_PER_WIDE_INT;
108 #endif
109 else
110 /* X is not an integer constant. */
111 return false;
113 if (width < HOST_BITS_PER_WIDE_INT)
114 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
115 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
118 /* Test whether VAL is equal to the most significant bit of mode MODE
119 (after masking with the mode mask of MODE). Returns false if the
120 precision of MODE is too large to handle. */
122 bool
123 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
125 unsigned int width;
127 if (GET_MODE_CLASS (mode) != MODE_INT)
128 return false;
130 width = GET_MODE_PRECISION (mode);
131 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
132 return false;
134 val &= GET_MODE_MASK (mode);
135 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
138 /* Test whether the most significant bit of mode MODE is set in VAL.
139 Returns false if the precision of MODE is too large to handle. */
140 bool
141 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
143 unsigned int width;
145 if (GET_MODE_CLASS (mode) != MODE_INT)
146 return false;
148 width = GET_MODE_PRECISION (mode);
149 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
150 return false;
152 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
153 return val != 0;
156 /* Test whether the most significant bit of mode MODE is clear in VAL.
157 Returns false if the precision of MODE is too large to handle. */
158 bool
159 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
161 unsigned int width;
163 if (GET_MODE_CLASS (mode) != MODE_INT)
164 return false;
166 width = GET_MODE_PRECISION (mode);
167 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
168 return false;
170 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
171 return val == 0;
174 /* Make a binary operation by properly ordering the operands and
175 seeing if the expression folds. */
178 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
179 rtx op1)
181 rtx tem;
183 /* If this simplifies, do it. */
184 tem = simplify_binary_operation (code, mode, op0, op1);
185 if (tem)
186 return tem;
188 /* Put complex operands first and constants second if commutative. */
189 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
190 && swap_commutative_operands_p (op0, op1))
191 std::swap (op0, op1);
193 return gen_rtx_fmt_ee (code, mode, op0, op1);
196 /* If X is a MEM referencing the constant pool, return the real value.
197 Otherwise return X. */
199 avoid_constant_pool_reference (rtx x)
201 rtx c, tmp, addr;
202 machine_mode cmode;
203 HOST_WIDE_INT offset = 0;
205 switch (GET_CODE (x))
207 case MEM:
208 break;
210 case FLOAT_EXTEND:
211 /* Handle float extensions of constant pool references. */
212 tmp = XEXP (x, 0);
213 c = avoid_constant_pool_reference (tmp);
214 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
215 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
216 GET_MODE (x));
217 return x;
219 default:
220 return x;
223 if (GET_MODE (x) == BLKmode)
224 return x;
226 addr = XEXP (x, 0);
228 /* Call target hook to avoid the effects of -fpic etc.... */
229 addr = targetm.delegitimize_address (addr);
231 /* Split the address into a base and integer offset. */
232 if (GET_CODE (addr) == CONST
233 && GET_CODE (XEXP (addr, 0)) == PLUS
234 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
236 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
237 addr = XEXP (XEXP (addr, 0), 0);
240 if (GET_CODE (addr) == LO_SUM)
241 addr = XEXP (addr, 1);
243 /* If this is a constant pool reference, we can turn it into its
244 constant and hope that simplifications happen. */
245 if (GET_CODE (addr) == SYMBOL_REF
246 && CONSTANT_POOL_ADDRESS_P (addr))
248 c = get_pool_constant (addr);
249 cmode = get_pool_mode (addr);
251 /* If we're accessing the constant in a different mode than it was
252 originally stored, attempt to fix that up via subreg simplifications.
253 If that fails we have no choice but to return the original memory. */
254 if ((offset != 0 || cmode != GET_MODE (x))
255 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
257 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
258 if (tem && CONSTANT_P (tem))
259 return tem;
261 else
262 return c;
265 return x;
268 /* Simplify a MEM based on its attributes. This is the default
269 delegitimize_address target hook, and it's recommended that every
270 overrider call it. */
273 delegitimize_mem_from_attrs (rtx x)
275 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
276 use their base addresses as equivalent. */
277 if (MEM_P (x)
278 && MEM_EXPR (x)
279 && MEM_OFFSET_KNOWN_P (x))
281 tree decl = MEM_EXPR (x);
282 machine_mode mode = GET_MODE (x);
283 HOST_WIDE_INT offset = 0;
285 switch (TREE_CODE (decl))
287 default:
288 decl = NULL;
289 break;
291 case VAR_DECL:
292 break;
294 case ARRAY_REF:
295 case ARRAY_RANGE_REF:
296 case COMPONENT_REF:
297 case BIT_FIELD_REF:
298 case REALPART_EXPR:
299 case IMAGPART_EXPR:
300 case VIEW_CONVERT_EXPR:
302 HOST_WIDE_INT bitsize, bitpos;
303 tree toffset;
304 int unsignedp, reversep, volatilep = 0;
306 decl
307 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
308 &unsignedp, &reversep, &volatilep, false);
309 if (bitsize != GET_MODE_BITSIZE (mode)
310 || (bitpos % BITS_PER_UNIT)
311 || (toffset && !tree_fits_shwi_p (toffset)))
312 decl = NULL;
313 else
315 offset += bitpos / BITS_PER_UNIT;
316 if (toffset)
317 offset += tree_to_shwi (toffset);
319 break;
323 if (decl
324 && mode == GET_MODE (x)
325 && TREE_CODE (decl) == VAR_DECL
326 && (TREE_STATIC (decl)
327 || DECL_THREAD_LOCAL_P (decl))
328 && DECL_RTL_SET_P (decl)
329 && MEM_P (DECL_RTL (decl)))
331 rtx newx;
333 offset += MEM_OFFSET (x);
335 newx = DECL_RTL (decl);
337 if (MEM_P (newx))
339 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 if (!((offset == 0
348 || (GET_CODE (o) == PLUS
349 && GET_CODE (XEXP (o, 1)) == CONST_INT
350 && (offset == INTVAL (XEXP (o, 1))
351 || (GET_CODE (n) == PLUS
352 && GET_CODE (XEXP (n, 1)) == CONST_INT
353 && (INTVAL (XEXP (n, 1)) + offset
354 == INTVAL (XEXP (o, 1)))
355 && (n = XEXP (n, 0))))
356 && (o = XEXP (o, 0))))
357 && rtx_equal_p (o, n)))
358 x = adjust_address_nv (newx, mode, offset);
360 else if (GET_MODE (x) == GET_MODE (newx)
361 && offset == 0)
362 x = newx;
366 return x;
369 /* Make a unary operation by first seeing if it folds and otherwise making
370 the specified operation. */
373 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
374 machine_mode op_mode)
376 rtx tem;
378 /* If this simplifies, use it. */
379 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
380 return tem;
382 return gen_rtx_fmt_e (code, mode, op);
385 /* Likewise for ternary operations. */
388 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
389 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
391 rtx tem;
393 /* If this simplifies, use it. */
394 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
395 op0, op1, op2)))
396 return tem;
398 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
401 /* Likewise, for relational operations.
402 CMP_MODE specifies mode comparison is done in. */
405 simplify_gen_relational (enum rtx_code code, machine_mode mode,
406 machine_mode cmp_mode, rtx op0, rtx op1)
408 rtx tem;
410 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
411 op0, op1)))
412 return tem;
414 return gen_rtx_fmt_ee (code, mode, op0, op1);
417 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
418 and simplify the result. If FN is non-NULL, call this callback on each
419 X, if it returns non-NULL, replace X with its return value and simplify the
420 result. */
423 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
424 rtx (*fn) (rtx, const_rtx, void *), void *data)
426 enum rtx_code code = GET_CODE (x);
427 machine_mode mode = GET_MODE (x);
428 machine_mode op_mode;
429 const char *fmt;
430 rtx op0, op1, op2, newx, op;
431 rtvec vec, newvec;
432 int i, j;
434 if (__builtin_expect (fn != NULL, 0))
436 newx = fn (x, old_rtx, data);
437 if (newx)
438 return newx;
440 else if (rtx_equal_p (x, old_rtx))
441 return copy_rtx ((rtx) data);
443 switch (GET_RTX_CLASS (code))
445 case RTX_UNARY:
446 op0 = XEXP (x, 0);
447 op_mode = GET_MODE (op0);
448 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
449 if (op0 == XEXP (x, 0))
450 return x;
451 return simplify_gen_unary (code, mode, op0, op_mode);
453 case RTX_BIN_ARITH:
454 case RTX_COMM_ARITH:
455 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
456 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
457 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
458 return x;
459 return simplify_gen_binary (code, mode, op0, op1);
461 case RTX_COMPARE:
462 case RTX_COMM_COMPARE:
463 op0 = XEXP (x, 0);
464 op1 = XEXP (x, 1);
465 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_relational (code, mode, op_mode, op0, op1);
472 case RTX_TERNARY:
473 case RTX_BITFIELD_OPS:
474 op0 = XEXP (x, 0);
475 op_mode = GET_MODE (op0);
476 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
477 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
478 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
480 return x;
481 if (op_mode == VOIDmode)
482 op_mode = GET_MODE (op0);
483 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
485 case RTX_EXTRA:
486 if (code == SUBREG)
488 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
489 if (op0 == SUBREG_REG (x))
490 return x;
491 op0 = simplify_gen_subreg (GET_MODE (x), op0,
492 GET_MODE (SUBREG_REG (x)),
493 SUBREG_BYTE (x));
494 return op0 ? op0 : x;
496 break;
498 case RTX_OBJ:
499 if (code == MEM)
501 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
502 if (op0 == XEXP (x, 0))
503 return x;
504 return replace_equiv_address_nv (x, op0);
506 else if (code == LO_SUM)
508 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
509 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
511 /* (lo_sum (high x) y) -> y where x and y have the same base. */
512 if (GET_CODE (op0) == HIGH)
514 rtx base0, base1, offset0, offset1;
515 split_const (XEXP (op0, 0), &base0, &offset0);
516 split_const (op1, &base1, &offset1);
517 if (rtx_equal_p (base0, base1))
518 return op1;
521 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
522 return x;
523 return gen_rtx_LO_SUM (mode, op0, op1);
525 break;
527 default:
528 break;
531 newx = x;
532 fmt = GET_RTX_FORMAT (code);
533 for (i = 0; fmt[i]; i++)
534 switch (fmt[i])
536 case 'E':
537 vec = XVEC (x, i);
538 newvec = XVEC (newx, i);
539 for (j = 0; j < GET_NUM_ELEM (vec); j++)
541 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
542 old_rtx, fn, data);
543 if (op != RTVEC_ELT (vec, j))
545 if (newvec == vec)
547 newvec = shallow_copy_rtvec (vec);
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XVEC (newx, i) = newvec;
552 RTVEC_ELT (newvec, j) = op;
555 break;
557 case 'e':
558 if (XEXP (x, i))
560 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
561 if (op != XEXP (x, i))
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XEXP (newx, i) = op;
568 break;
570 return newx;
573 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
574 resulting RTX. Return a new RTX which is as simplified as possible. */
577 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
579 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
582 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
583 Only handle cases where the truncated value is inherently an rvalue.
585 RTL provides two ways of truncating a value:
587 1. a lowpart subreg. This form is only a truncation when both
588 the outer and inner modes (here MODE and OP_MODE respectively)
589 are scalar integers, and only then when the subreg is used as
590 an rvalue.
592 It is only valid to form such truncating subregs if the
593 truncation requires no action by the target. The onus for
594 proving this is on the creator of the subreg -- e.g. the
595 caller to simplify_subreg or simplify_gen_subreg -- and typically
596 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
598 2. a TRUNCATE. This form handles both scalar and compound integers.
600 The first form is preferred where valid. However, the TRUNCATE
601 handling in simplify_unary_operation turns the second form into the
602 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
603 so it is generally safe to form rvalue truncations using:
605 simplify_gen_unary (TRUNCATE, ...)
607 and leave simplify_unary_operation to work out which representation
608 should be used.
610 Because of the proof requirements on (1), simplify_truncation must
611 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
612 regardless of whether the outer truncation came from a SUBREG or a
613 TRUNCATE. For example, if the caller has proven that an SImode
614 truncation of:
616 (and:DI X Y)
618 is a no-op and can be represented as a subreg, it does not follow
619 that SImode truncations of X and Y are also no-ops. On a target
620 like 64-bit MIPS that requires SImode values to be stored in
621 sign-extended form, an SImode truncation of:
623 (and:DI (reg:DI X) (const_int 63))
625 is trivially a no-op because only the lower 6 bits can be set.
626 However, X is still an arbitrary 64-bit number and so we cannot
627 assume that truncating it too is a no-op. */
629 static rtx
630 simplify_truncation (machine_mode mode, rtx op,
631 machine_mode op_mode)
633 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
634 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
635 gcc_assert (precision <= op_precision);
637 /* Optimize truncations of zero and sign extended values. */
638 if (GET_CODE (op) == ZERO_EXTEND
639 || GET_CODE (op) == SIGN_EXTEND)
641 /* There are three possibilities. If MODE is the same as the
642 origmode, we can omit both the extension and the subreg.
643 If MODE is not larger than the origmode, we can apply the
644 truncation without the extension. Finally, if the outermode
645 is larger than the origmode, we can just extend to the appropriate
646 mode. */
647 machine_mode origmode = GET_MODE (XEXP (op, 0));
648 if (mode == origmode)
649 return XEXP (op, 0);
650 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
651 return simplify_gen_unary (TRUNCATE, mode,
652 XEXP (op, 0), origmode);
653 else
654 return simplify_gen_unary (GET_CODE (op), mode,
655 XEXP (op, 0), origmode);
658 /* If the machine can perform operations in the truncated mode, distribute
659 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
660 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
661 if (1
662 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
663 && (GET_CODE (op) == PLUS
664 || GET_CODE (op) == MINUS
665 || GET_CODE (op) == MULT))
667 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
668 if (op0)
670 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
671 if (op1)
672 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
676 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
677 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
678 the outer subreg is effectively a truncation to the original mode. */
679 if ((GET_CODE (op) == LSHIFTRT
680 || GET_CODE (op) == ASHIFTRT)
681 /* Ensure that OP_MODE is at least twice as wide as MODE
682 to avoid the possibility that an outer LSHIFTRT shifts by more
683 than the sign extension's sign_bit_copies and introduces zeros
684 into the high bits of the result. */
685 && 2 * precision <= op_precision
686 && CONST_INT_P (XEXP (op, 1))
687 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
688 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
689 && UINTVAL (XEXP (op, 1)) < precision)
690 return simplify_gen_binary (ASHIFTRT, mode,
691 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
693 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
694 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
695 the outer subreg is effectively a truncation to the original mode. */
696 if ((GET_CODE (op) == LSHIFTRT
697 || GET_CODE (op) == ASHIFTRT)
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
701 && UINTVAL (XEXP (op, 1)) < precision)
702 return simplify_gen_binary (LSHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
706 to (ashift:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if (GET_CODE (op) == ASHIFT
709 && CONST_INT_P (XEXP (op, 1))
710 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
713 && UINTVAL (XEXP (op, 1)) < precision)
714 return simplify_gen_binary (ASHIFT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
718 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
719 and C2. */
720 if (GET_CODE (op) == AND
721 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
722 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
723 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
724 && CONST_INT_P (XEXP (op, 1)))
726 rtx op0 = (XEXP (XEXP (op, 0), 0));
727 rtx shift_op = XEXP (XEXP (op, 0), 1);
728 rtx mask_op = XEXP (op, 1);
729 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
730 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
732 if (shift < precision
733 /* If doing this transform works for an X with all bits set,
734 it works for any X. */
735 && ((GET_MODE_MASK (mode) >> shift) & mask)
736 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
737 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
738 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
740 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
741 return simplify_gen_binary (AND, mode, op0, mask_op);
745 /* Recognize a word extraction from a multi-word subreg. */
746 if ((GET_CODE (op) == LSHIFTRT
747 || GET_CODE (op) == ASHIFTRT)
748 && SCALAR_INT_MODE_P (mode)
749 && SCALAR_INT_MODE_P (op_mode)
750 && precision >= BITS_PER_WORD
751 && 2 * precision <= op_precision
752 && CONST_INT_P (XEXP (op, 1))
753 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
754 && UINTVAL (XEXP (op, 1)) < op_precision)
756 int byte = subreg_lowpart_offset (mode, op_mode);
757 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
758 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
759 (WORDS_BIG_ENDIAN
760 ? byte - shifted_bytes
761 : byte + shifted_bytes));
764 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
765 and try replacing the TRUNCATE and shift with it. Don't do this
766 if the MEM has a mode-dependent address. */
767 if ((GET_CODE (op) == LSHIFTRT
768 || GET_CODE (op) == ASHIFTRT)
769 && SCALAR_INT_MODE_P (op_mode)
770 && MEM_P (XEXP (op, 0))
771 && CONST_INT_P (XEXP (op, 1))
772 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
773 && INTVAL (XEXP (op, 1)) > 0
774 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
775 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
776 MEM_ADDR_SPACE (XEXP (op, 0)))
777 && ! MEM_VOLATILE_P (XEXP (op, 0))
778 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
779 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
781 int byte = subreg_lowpart_offset (mode, op_mode);
782 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
783 return adjust_address_nv (XEXP (op, 0), mode,
784 (WORDS_BIG_ENDIAN
785 ? byte - shifted_bytes
786 : byte + shifted_bytes));
789 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
790 (OP:SI foo:SI) if OP is NEG or ABS. */
791 if ((GET_CODE (op) == ABS
792 || GET_CODE (op) == NEG)
793 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
794 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
795 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
796 return simplify_gen_unary (GET_CODE (op), mode,
797 XEXP (XEXP (op, 0), 0), mode);
799 /* (truncate:A (subreg:B (truncate:C X) 0)) is
800 (truncate:A X). */
801 if (GET_CODE (op) == SUBREG
802 && SCALAR_INT_MODE_P (mode)
803 && SCALAR_INT_MODE_P (op_mode)
804 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
805 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
806 && subreg_lowpart_p (op))
808 rtx inner = XEXP (SUBREG_REG (op), 0);
809 if (GET_MODE_PRECISION (mode)
810 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
811 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
812 else
813 /* If subreg above is paradoxical and C is narrower
814 than A, return (subreg:A (truncate:C X) 0). */
815 return simplify_gen_subreg (mode, SUBREG_REG (op),
816 GET_MODE (SUBREG_REG (op)), 0);
819 /* (truncate:A (truncate:B X)) is (truncate:A X). */
820 if (GET_CODE (op) == TRUNCATE)
821 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
824 return NULL_RTX;
827 /* Try to simplify a unary operation CODE whose output mode is to be
828 MODE with input operand OP whose mode was originally OP_MODE.
829 Return zero if no simplification can be made. */
831 simplify_unary_operation (enum rtx_code code, machine_mode mode,
832 rtx op, machine_mode op_mode)
834 rtx trueop, tem;
836 trueop = avoid_constant_pool_reference (op);
838 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
839 if (tem)
840 return tem;
842 return simplify_unary_operation_1 (code, mode, op);
845 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
846 to be exact. */
848 static bool
849 exact_int_to_float_conversion_p (const_rtx op)
851 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
852 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
853 /* Constants shouldn't reach here. */
854 gcc_assert (op0_mode != VOIDmode);
855 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
856 int in_bits = in_prec;
857 if (HWI_COMPUTABLE_MODE_P (op0_mode))
859 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
860 if (GET_CODE (op) == FLOAT)
861 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
862 else if (GET_CODE (op) == UNSIGNED_FLOAT)
863 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
864 else
865 gcc_unreachable ();
866 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
868 return in_bits <= out_bits;
871 /* Perform some simplifications we can do even if the operands
872 aren't constant. */
873 static rtx
874 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
876 enum rtx_code reversed;
877 rtx temp;
879 switch (code)
881 case NOT:
882 /* (not (not X)) == X. */
883 if (GET_CODE (op) == NOT)
884 return XEXP (op, 0);
886 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
887 comparison is all ones. */
888 if (COMPARISON_P (op)
889 && (mode == BImode || STORE_FLAG_VALUE == -1)
890 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
891 return simplify_gen_relational (reversed, mode, VOIDmode,
892 XEXP (op, 0), XEXP (op, 1));
894 /* (not (plus X -1)) can become (neg X). */
895 if (GET_CODE (op) == PLUS
896 && XEXP (op, 1) == constm1_rtx)
897 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
899 /* Similarly, (not (neg X)) is (plus X -1). */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
902 CONSTM1_RTX (mode));
904 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
905 if (GET_CODE (op) == XOR
906 && CONST_INT_P (XEXP (op, 1))
907 && (temp = simplify_unary_operation (NOT, mode,
908 XEXP (op, 1), mode)) != 0)
909 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
911 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
912 if (GET_CODE (op) == PLUS
913 && CONST_INT_P (XEXP (op, 1))
914 && mode_signbit_p (mode, XEXP (op, 1))
915 && (temp = simplify_unary_operation (NOT, mode,
916 XEXP (op, 1), mode)) != 0)
917 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
924 bother with. */
925 if (GET_CODE (op) == ASHIFT
926 && XEXP (op, 0) == const1_rtx)
928 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
929 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
932 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
933 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
934 so we can perform the above simplification. */
935 if (STORE_FLAG_VALUE == -1
936 && GET_CODE (op) == ASHIFTRT
937 && CONST_INT_P (XEXP (op, 1))
938 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
939 return simplify_gen_relational (GE, mode, VOIDmode,
940 XEXP (op, 0), const0_rtx);
943 if (GET_CODE (op) == SUBREG
944 && subreg_lowpart_p (op)
945 && (GET_MODE_SIZE (GET_MODE (op))
946 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
947 && GET_CODE (SUBREG_REG (op)) == ASHIFT
948 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
950 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
951 rtx x;
953 x = gen_rtx_ROTATE (inner_mode,
954 simplify_gen_unary (NOT, inner_mode, const1_rtx,
955 inner_mode),
956 XEXP (SUBREG_REG (op), 1));
957 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
958 if (temp)
959 return temp;
962 /* Apply De Morgan's laws to reduce number of patterns for machines
963 with negating logical insns (and-not, nand, etc.). If result has
964 only one NOT, put it first, since that is how the patterns are
965 coded. */
966 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
968 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
969 machine_mode op_mode;
971 op_mode = GET_MODE (in1);
972 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
974 op_mode = GET_MODE (in2);
975 if (op_mode == VOIDmode)
976 op_mode = mode;
977 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
979 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
980 std::swap (in1, in2);
982 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
983 mode, in1, in2);
986 /* (not (bswap x)) -> (bswap (not x)). */
987 if (GET_CODE (op) == BSWAP)
989 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
990 return simplify_gen_unary (BSWAP, mode, x, mode);
992 break;
994 case NEG:
995 /* (neg (neg X)) == X. */
996 if (GET_CODE (op) == NEG)
997 return XEXP (op, 0);
999 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1000 If comparison is not reversible use
1001 x ? y : (neg y). */
1002 if (GET_CODE (op) == IF_THEN_ELSE)
1004 rtx cond = XEXP (op, 0);
1005 rtx true_rtx = XEXP (op, 1);
1006 rtx false_rtx = XEXP (op, 2);
1008 if ((GET_CODE (true_rtx) == NEG
1009 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1010 || (GET_CODE (false_rtx) == NEG
1011 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1013 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
1014 temp = reversed_comparison (cond, mode);
1015 else
1017 temp = cond;
1018 std::swap (true_rtx, false_rtx);
1020 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1021 mode, temp, true_rtx, false_rtx);
1025 /* (neg (plus X 1)) can become (not X). */
1026 if (GET_CODE (op) == PLUS
1027 && XEXP (op, 1) == const1_rtx)
1028 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1030 /* Similarly, (neg (not X)) is (plus X 1). */
1031 if (GET_CODE (op) == NOT)
1032 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1033 CONST1_RTX (mode));
1035 /* (neg (minus X Y)) can become (minus Y X). This transformation
1036 isn't safe for modes with signed zeros, since if X and Y are
1037 both +0, (minus Y X) is the same as (minus X Y). If the
1038 rounding mode is towards +infinity (or -infinity) then the two
1039 expressions will be rounded differently. */
1040 if (GET_CODE (op) == MINUS
1041 && !HONOR_SIGNED_ZEROS (mode)
1042 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1043 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1045 if (GET_CODE (op) == PLUS
1046 && !HONOR_SIGNED_ZEROS (mode)
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1049 /* (neg (plus A C)) is simplified to (minus -C A). */
1050 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1051 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1053 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1054 if (temp)
1055 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1058 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1059 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1060 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1063 /* (neg (mult A B)) becomes (mult A (neg B)).
1064 This works even for floating-point values. */
1065 if (GET_CODE (op) == MULT
1066 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1068 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1069 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1072 /* NEG commutes with ASHIFT since it is multiplication. Only do
1073 this if we can then eliminate the NEG (e.g., if the operand
1074 is a constant). */
1075 if (GET_CODE (op) == ASHIFT)
1077 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1078 if (temp)
1079 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1082 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1083 C is equal to the width of MODE minus 1. */
1084 if (GET_CODE (op) == ASHIFTRT
1085 && CONST_INT_P (XEXP (op, 1))
1086 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1087 return simplify_gen_binary (LSHIFTRT, mode,
1088 XEXP (op, 0), XEXP (op, 1));
1090 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1091 C is equal to the width of MODE minus 1. */
1092 if (GET_CODE (op) == LSHIFTRT
1093 && CONST_INT_P (XEXP (op, 1))
1094 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1095 return simplify_gen_binary (ASHIFTRT, mode,
1096 XEXP (op, 0), XEXP (op, 1));
1098 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1099 if (GET_CODE (op) == XOR
1100 && XEXP (op, 1) == const1_rtx
1101 && nonzero_bits (XEXP (op, 0), mode) == 1)
1102 return plus_constant (mode, XEXP (op, 0), -1);
1104 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1105 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1106 if (GET_CODE (op) == LT
1107 && XEXP (op, 1) == const0_rtx
1108 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1110 machine_mode inner = GET_MODE (XEXP (op, 0));
1111 int isize = GET_MODE_PRECISION (inner);
1112 if (STORE_FLAG_VALUE == 1)
1114 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1115 GEN_INT (isize - 1));
1116 if (mode == inner)
1117 return temp;
1118 if (GET_MODE_PRECISION (mode) > isize)
1119 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1120 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1122 else if (STORE_FLAG_VALUE == -1)
1124 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1125 GEN_INT (isize - 1));
1126 if (mode == inner)
1127 return temp;
1128 if (GET_MODE_PRECISION (mode) > isize)
1129 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1130 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1133 break;
1135 case TRUNCATE:
1136 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1137 with the umulXi3_highpart patterns. */
1138 if (GET_CODE (op) == LSHIFTRT
1139 && GET_CODE (XEXP (op, 0)) == MULT)
1140 break;
1142 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1144 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1146 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1147 if (temp)
1148 return temp;
1150 /* We can't handle truncation to a partial integer mode here
1151 because we don't know the real bitsize of the partial
1152 integer mode. */
1153 break;
1156 if (GET_MODE (op) != VOIDmode)
1158 temp = simplify_truncation (mode, op, GET_MODE (op));
1159 if (temp)
1160 return temp;
1163 /* If we know that the value is already truncated, we can
1164 replace the TRUNCATE with a SUBREG. */
1165 if (GET_MODE_NUNITS (mode) == 1
1166 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1167 || truncated_to_mode (mode, op)))
1169 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1170 if (temp)
1171 return temp;
1174 /* A truncate of a comparison can be replaced with a subreg if
1175 STORE_FLAG_VALUE permits. This is like the previous test,
1176 but it works even if the comparison is done in a mode larger
1177 than HOST_BITS_PER_WIDE_INT. */
1178 if (HWI_COMPUTABLE_MODE_P (mode)
1179 && COMPARISON_P (op)
1180 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1182 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1183 if (temp)
1184 return temp;
1187 /* A truncate of a memory is just loading the low part of the memory
1188 if we are not changing the meaning of the address. */
1189 if (GET_CODE (op) == MEM
1190 && !VECTOR_MODE_P (mode)
1191 && !MEM_VOLATILE_P (op)
1192 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1194 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1195 if (temp)
1196 return temp;
1199 break;
1201 case FLOAT_TRUNCATE:
1202 if (DECIMAL_FLOAT_MODE_P (mode))
1203 break;
1205 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1206 if (GET_CODE (op) == FLOAT_EXTEND
1207 && GET_MODE (XEXP (op, 0)) == mode)
1208 return XEXP (op, 0);
1210 /* (float_truncate:SF (float_truncate:DF foo:XF))
1211 = (float_truncate:SF foo:XF).
1212 This may eliminate double rounding, so it is unsafe.
1214 (float_truncate:SF (float_extend:XF foo:DF))
1215 = (float_truncate:SF foo:DF).
1217 (float_truncate:DF (float_extend:XF foo:SF))
1218 = (float_extend:DF foo:SF). */
1219 if ((GET_CODE (op) == FLOAT_TRUNCATE
1220 && flag_unsafe_math_optimizations)
1221 || GET_CODE (op) == FLOAT_EXTEND)
1222 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1223 0)))
1224 > GET_MODE_SIZE (mode)
1225 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1226 mode,
1227 XEXP (op, 0), mode);
1229 /* (float_truncate (float x)) is (float x) */
1230 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1231 && (flag_unsafe_math_optimizations
1232 || exact_int_to_float_conversion_p (op)))
1233 return simplify_gen_unary (GET_CODE (op), mode,
1234 XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1237 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1238 (OP:SF foo:SF) if OP is NEG or ABS. */
1239 if ((GET_CODE (op) == ABS
1240 || GET_CODE (op) == NEG)
1241 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1242 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1243 return simplify_gen_unary (GET_CODE (op), mode,
1244 XEXP (XEXP (op, 0), 0), mode);
1246 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1247 is (float_truncate:SF x). */
1248 if (GET_CODE (op) == SUBREG
1249 && subreg_lowpart_p (op)
1250 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1251 return SUBREG_REG (op);
1252 break;
1254 case FLOAT_EXTEND:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1258 /* (float_extend (float_extend x)) is (float_extend x)
1260 (float_extend (float x)) is (float x) assuming that double
1261 rounding can't happen.
1263 if (GET_CODE (op) == FLOAT_EXTEND
1264 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1265 && exact_int_to_float_conversion_p (op)))
1266 return simplify_gen_unary (GET_CODE (op), mode,
1267 XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1270 break;
1272 case ABS:
1273 /* (abs (neg <foo>)) -> (abs <foo>) */
1274 if (GET_CODE (op) == NEG)
1275 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1278 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1279 do nothing. */
1280 if (GET_MODE (op) == VOIDmode)
1281 break;
1283 /* If operand is something known to be positive, ignore the ABS. */
1284 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1285 || val_signbit_known_clear_p (GET_MODE (op),
1286 nonzero_bits (op, GET_MODE (op))))
1287 return op;
1289 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1290 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1291 return gen_rtx_NEG (mode, op);
1293 break;
1295 case FFS:
1296 /* (ffs (*_extend <X>)) = (ffs <X>) */
1297 if (GET_CODE (op) == SIGN_EXTEND
1298 || GET_CODE (op) == ZERO_EXTEND)
1299 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1300 GET_MODE (XEXP (op, 0)));
1301 break;
1303 case POPCOUNT:
1304 switch (GET_CODE (op))
1306 case BSWAP:
1307 case ZERO_EXTEND:
1308 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1309 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1312 case ROTATE:
1313 case ROTATERT:
1314 /* Rotations don't affect popcount. */
1315 if (!side_effects_p (XEXP (op, 1)))
1316 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1317 GET_MODE (XEXP (op, 0)));
1318 break;
1320 default:
1321 break;
1323 break;
1325 case PARITY:
1326 switch (GET_CODE (op))
1328 case NOT:
1329 case BSWAP:
1330 case ZERO_EXTEND:
1331 case SIGN_EXTEND:
1332 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1333 GET_MODE (XEXP (op, 0)));
1335 case ROTATE:
1336 case ROTATERT:
1337 /* Rotations don't affect parity. */
1338 if (!side_effects_p (XEXP (op, 1)))
1339 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1341 break;
1343 default:
1344 break;
1346 break;
1348 case BSWAP:
1349 /* (bswap (bswap x)) -> x. */
1350 if (GET_CODE (op) == BSWAP)
1351 return XEXP (op, 0);
1352 break;
1354 case FLOAT:
1355 /* (float (sign_extend <X>)) = (float <X>). */
1356 if (GET_CODE (op) == SIGN_EXTEND)
1357 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1358 GET_MODE (XEXP (op, 0)));
1359 break;
1361 case SIGN_EXTEND:
1362 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1363 becomes just the MINUS if its mode is MODE. This allows
1364 folding switch statements on machines using casesi (such as
1365 the VAX). */
1366 if (GET_CODE (op) == TRUNCATE
1367 && GET_MODE (XEXP (op, 0)) == mode
1368 && GET_CODE (XEXP (op, 0)) == MINUS
1369 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1370 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1371 return XEXP (op, 0);
1373 /* Extending a widening multiplication should be canonicalized to
1374 a wider widening multiplication. */
1375 if (GET_CODE (op) == MULT)
1377 rtx lhs = XEXP (op, 0);
1378 rtx rhs = XEXP (op, 1);
1379 enum rtx_code lcode = GET_CODE (lhs);
1380 enum rtx_code rcode = GET_CODE (rhs);
1382 /* Widening multiplies usually extend both operands, but sometimes
1383 they use a shift to extract a portion of a register. */
1384 if ((lcode == SIGN_EXTEND
1385 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1386 && (rcode == SIGN_EXTEND
1387 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1389 machine_mode lmode = GET_MODE (lhs);
1390 machine_mode rmode = GET_MODE (rhs);
1391 int bits;
1393 if (lcode == ASHIFTRT)
1394 /* Number of bits not shifted off the end. */
1395 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1396 else /* lcode == SIGN_EXTEND */
1397 /* Size of inner mode. */
1398 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1400 if (rcode == ASHIFTRT)
1401 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1402 else /* rcode == SIGN_EXTEND */
1403 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1405 /* We can only widen multiplies if the result is mathematiclly
1406 equivalent. I.e. if overflow was impossible. */
1407 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1408 return simplify_gen_binary
1409 (MULT, mode,
1410 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1411 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1415 /* Check for a sign extension of a subreg of a promoted
1416 variable, where the promotion is sign-extended, and the
1417 target mode is the same as the variable's promotion. */
1418 if (GET_CODE (op) == SUBREG
1419 && SUBREG_PROMOTED_VAR_P (op)
1420 && SUBREG_PROMOTED_SIGNED_P (op)
1421 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1423 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1424 if (temp)
1425 return temp;
1428 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1429 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1430 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1432 gcc_assert (GET_MODE_PRECISION (mode)
1433 > GET_MODE_PRECISION (GET_MODE (op)));
1434 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1435 GET_MODE (XEXP (op, 0)));
1438 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1439 is (sign_extend:M (subreg:O <X>)) if there is mode with
1440 GET_MODE_BITSIZE (N) - I bits.
1441 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1442 is similarly (zero_extend:M (subreg:O <X>)). */
1443 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1444 && GET_CODE (XEXP (op, 0)) == ASHIFT
1445 && CONST_INT_P (XEXP (op, 1))
1446 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1447 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1449 machine_mode tmode
1450 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1451 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1452 gcc_assert (GET_MODE_BITSIZE (mode)
1453 > GET_MODE_BITSIZE (GET_MODE (op)));
1454 if (tmode != BLKmode)
1456 rtx inner =
1457 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1458 if (inner)
1459 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1460 ? SIGN_EXTEND : ZERO_EXTEND,
1461 mode, inner, tmode);
1465 #if defined(POINTERS_EXTEND_UNSIGNED)
1466 /* As we do not know which address space the pointer is referring to,
1467 we can do this only if the target does not support different pointer
1468 or address modes depending on the address space. */
1469 if (target_default_pointer_address_modes_p ()
1470 && ! POINTERS_EXTEND_UNSIGNED
1471 && mode == Pmode && GET_MODE (op) == ptr_mode
1472 && (CONSTANT_P (op)
1473 || (GET_CODE (op) == SUBREG
1474 && REG_P (SUBREG_REG (op))
1475 && REG_POINTER (SUBREG_REG (op))
1476 && GET_MODE (SUBREG_REG (op)) == Pmode))
1477 && !targetm.have_ptr_extend ())
1478 return convert_memory_address (Pmode, op);
1479 #endif
1480 break;
1482 case ZERO_EXTEND:
1483 /* Check for a zero extension of a subreg of a promoted
1484 variable, where the promotion is zero-extended, and the
1485 target mode is the same as the variable's promotion. */
1486 if (GET_CODE (op) == SUBREG
1487 && SUBREG_PROMOTED_VAR_P (op)
1488 && SUBREG_PROMOTED_UNSIGNED_P (op)
1489 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1491 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1492 if (temp)
1493 return temp;
1496 /* Extending a widening multiplication should be canonicalized to
1497 a wider widening multiplication. */
1498 if (GET_CODE (op) == MULT)
1500 rtx lhs = XEXP (op, 0);
1501 rtx rhs = XEXP (op, 1);
1502 enum rtx_code lcode = GET_CODE (lhs);
1503 enum rtx_code rcode = GET_CODE (rhs);
1505 /* Widening multiplies usually extend both operands, but sometimes
1506 they use a shift to extract a portion of a register. */
1507 if ((lcode == ZERO_EXTEND
1508 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1509 && (rcode == ZERO_EXTEND
1510 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1512 machine_mode lmode = GET_MODE (lhs);
1513 machine_mode rmode = GET_MODE (rhs);
1514 int bits;
1516 if (lcode == LSHIFTRT)
1517 /* Number of bits not shifted off the end. */
1518 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1519 else /* lcode == ZERO_EXTEND */
1520 /* Size of inner mode. */
1521 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1523 if (rcode == LSHIFTRT)
1524 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1525 else /* rcode == ZERO_EXTEND */
1526 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1528 /* We can only widen multiplies if the result is mathematiclly
1529 equivalent. I.e. if overflow was impossible. */
1530 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1531 return simplify_gen_binary
1532 (MULT, mode,
1533 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1534 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1538 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1539 if (GET_CODE (op) == ZERO_EXTEND)
1540 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1541 GET_MODE (XEXP (op, 0)));
1543 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1544 is (zero_extend:M (subreg:O <X>)) if there is mode with
1545 GET_MODE_PRECISION (N) - I bits. */
1546 if (GET_CODE (op) == LSHIFTRT
1547 && GET_CODE (XEXP (op, 0)) == ASHIFT
1548 && CONST_INT_P (XEXP (op, 1))
1549 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1550 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1552 machine_mode tmode
1553 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1554 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1555 if (tmode != BLKmode)
1557 rtx inner =
1558 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1559 if (inner)
1560 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1564 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1565 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1566 of mode N. E.g.
1567 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1568 (and:SI (reg:SI) (const_int 63)). */
1569 if (GET_CODE (op) == SUBREG
1570 && GET_MODE_PRECISION (GET_MODE (op))
1571 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1572 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1573 <= HOST_BITS_PER_WIDE_INT
1574 && GET_MODE_PRECISION (mode)
1575 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1576 && subreg_lowpart_p (op)
1577 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1578 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1580 if (GET_MODE_PRECISION (mode)
1581 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1582 return SUBREG_REG (op);
1583 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1584 GET_MODE (SUBREG_REG (op)));
1587 #if defined(POINTERS_EXTEND_UNSIGNED)
1588 /* As we do not know which address space the pointer is referring to,
1589 we can do this only if the target does not support different pointer
1590 or address modes depending on the address space. */
1591 if (target_default_pointer_address_modes_p ()
1592 && POINTERS_EXTEND_UNSIGNED > 0
1593 && mode == Pmode && GET_MODE (op) == ptr_mode
1594 && (CONSTANT_P (op)
1595 || (GET_CODE (op) == SUBREG
1596 && REG_P (SUBREG_REG (op))
1597 && REG_POINTER (SUBREG_REG (op))
1598 && GET_MODE (SUBREG_REG (op)) == Pmode))
1599 && !targetm.have_ptr_extend ())
1600 return convert_memory_address (Pmode, op);
1601 #endif
1602 break;
1604 default:
1605 break;
1608 return 0;
1611 /* Try to compute the value of a unary operation CODE whose output mode is to
1612 be MODE with input operand OP whose mode was originally OP_MODE.
1613 Return zero if the value cannot be computed. */
1615 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1616 rtx op, machine_mode op_mode)
1618 unsigned int width = GET_MODE_PRECISION (mode);
1620 if (code == VEC_DUPLICATE)
1622 gcc_assert (VECTOR_MODE_P (mode));
1623 if (GET_MODE (op) != VOIDmode)
1625 if (!VECTOR_MODE_P (GET_MODE (op)))
1626 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1627 else
1628 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1629 (GET_MODE (op)));
1631 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1632 || GET_CODE (op) == CONST_VECTOR)
1634 int elt_size = GET_MODE_UNIT_SIZE (mode);
1635 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1636 rtvec v = rtvec_alloc (n_elts);
1637 unsigned int i;
1639 if (GET_CODE (op) != CONST_VECTOR)
1640 for (i = 0; i < n_elts; i++)
1641 RTVEC_ELT (v, i) = op;
1642 else
1644 machine_mode inmode = GET_MODE (op);
1645 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1646 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1648 gcc_assert (in_n_elts < n_elts);
1649 gcc_assert ((n_elts % in_n_elts) == 0);
1650 for (i = 0; i < n_elts; i++)
1651 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1653 return gen_rtx_CONST_VECTOR (mode, v);
1657 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1659 int elt_size = GET_MODE_UNIT_SIZE (mode);
1660 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1661 machine_mode opmode = GET_MODE (op);
1662 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1663 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1664 rtvec v = rtvec_alloc (n_elts);
1665 unsigned int i;
1667 gcc_assert (op_n_elts == n_elts);
1668 for (i = 0; i < n_elts; i++)
1670 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1671 CONST_VECTOR_ELT (op, i),
1672 GET_MODE_INNER (opmode));
1673 if (!x)
1674 return 0;
1675 RTVEC_ELT (v, i) = x;
1677 return gen_rtx_CONST_VECTOR (mode, v);
1680 /* The order of these tests is critical so that, for example, we don't
1681 check the wrong mode (input vs. output) for a conversion operation,
1682 such as FIX. At some point, this should be simplified. */
1684 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1686 REAL_VALUE_TYPE d;
1688 if (op_mode == VOIDmode)
1690 /* CONST_INT have VOIDmode as the mode. We assume that all
1691 the bits of the constant are significant, though, this is
1692 a dangerous assumption as many times CONST_INTs are
1693 created and used with garbage in the bits outside of the
1694 precision of the implied mode of the const_int. */
1695 op_mode = MAX_MODE_INT;
1698 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1699 d = real_value_truncate (mode, d);
1700 return const_double_from_real_value (d, mode);
1702 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1704 REAL_VALUE_TYPE d;
1706 if (op_mode == VOIDmode)
1708 /* CONST_INT have VOIDmode as the mode. We assume that all
1709 the bits of the constant are significant, though, this is
1710 a dangerous assumption as many times CONST_INTs are
1711 created and used with garbage in the bits outside of the
1712 precision of the implied mode of the const_int. */
1713 op_mode = MAX_MODE_INT;
1716 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1717 d = real_value_truncate (mode, d);
1718 return const_double_from_real_value (d, mode);
1721 if (CONST_SCALAR_INT_P (op) && width > 0)
1723 wide_int result;
1724 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1725 rtx_mode_t op0 = std::make_pair (op, imode);
1726 int int_value;
1728 #if TARGET_SUPPORTS_WIDE_INT == 0
1729 /* This assert keeps the simplification from producing a result
1730 that cannot be represented in a CONST_DOUBLE but a lot of
1731 upstream callers expect that this function never fails to
1732 simplify something and so you if you added this to the test
1733 above the code would die later anyway. If this assert
1734 happens, you just need to make the port support wide int. */
1735 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1736 #endif
1738 switch (code)
1740 case NOT:
1741 result = wi::bit_not (op0);
1742 break;
1744 case NEG:
1745 result = wi::neg (op0);
1746 break;
1748 case ABS:
1749 result = wi::abs (op0);
1750 break;
1752 case FFS:
1753 result = wi::shwi (wi::ffs (op0), mode);
1754 break;
1756 case CLZ:
1757 if (wi::ne_p (op0, 0))
1758 int_value = wi::clz (op0);
1759 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1760 int_value = GET_MODE_PRECISION (mode);
1761 result = wi::shwi (int_value, mode);
1762 break;
1764 case CLRSB:
1765 result = wi::shwi (wi::clrsb (op0), mode);
1766 break;
1768 case CTZ:
1769 if (wi::ne_p (op0, 0))
1770 int_value = wi::ctz (op0);
1771 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1772 int_value = GET_MODE_PRECISION (mode);
1773 result = wi::shwi (int_value, mode);
1774 break;
1776 case POPCOUNT:
1777 result = wi::shwi (wi::popcount (op0), mode);
1778 break;
1780 case PARITY:
1781 result = wi::shwi (wi::parity (op0), mode);
1782 break;
1784 case BSWAP:
1785 result = wide_int (op0).bswap ();
1786 break;
1788 case TRUNCATE:
1789 case ZERO_EXTEND:
1790 result = wide_int::from (op0, width, UNSIGNED);
1791 break;
1793 case SIGN_EXTEND:
1794 result = wide_int::from (op0, width, SIGNED);
1795 break;
1797 case SQRT:
1798 default:
1799 return 0;
1802 return immed_wide_int_const (result, mode);
1805 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1806 && SCALAR_FLOAT_MODE_P (mode)
1807 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1809 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1810 switch (code)
1812 case SQRT:
1813 return 0;
1814 case ABS:
1815 d = real_value_abs (&d);
1816 break;
1817 case NEG:
1818 d = real_value_negate (&d);
1819 break;
1820 case FLOAT_TRUNCATE:
1821 d = real_value_truncate (mode, d);
1822 break;
1823 case FLOAT_EXTEND:
1824 /* All this does is change the mode, unless changing
1825 mode class. */
1826 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1827 real_convert (&d, mode, &d);
1828 break;
1829 case FIX:
1830 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1831 break;
1832 case NOT:
1834 long tmp[4];
1835 int i;
1837 real_to_target (tmp, &d, GET_MODE (op));
1838 for (i = 0; i < 4; i++)
1839 tmp[i] = ~tmp[i];
1840 real_from_target (&d, tmp, mode);
1841 break;
1843 default:
1844 gcc_unreachable ();
1846 return const_double_from_real_value (d, mode);
1848 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1849 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1850 && GET_MODE_CLASS (mode) == MODE_INT
1851 && width > 0)
1853 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1854 operators are intentionally left unspecified (to ease implementation
1855 by target backends), for consistency, this routine implements the
1856 same semantics for constant folding as used by the middle-end. */
1858 /* This was formerly used only for non-IEEE float.
1859 eggert@twinsun.com says it is safe for IEEE also. */
1860 REAL_VALUE_TYPE t;
1861 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1862 wide_int wmax, wmin;
1863 /* This is part of the abi to real_to_integer, but we check
1864 things before making this call. */
1865 bool fail;
1867 switch (code)
1869 case FIX:
1870 if (REAL_VALUE_ISNAN (*x))
1871 return const0_rtx;
1873 /* Test against the signed upper bound. */
1874 wmax = wi::max_value (width, SIGNED);
1875 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1876 if (real_less (&t, x))
1877 return immed_wide_int_const (wmax, mode);
1879 /* Test against the signed lower bound. */
1880 wmin = wi::min_value (width, SIGNED);
1881 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1882 if (real_less (x, &t))
1883 return immed_wide_int_const (wmin, mode);
1885 return immed_wide_int_const (real_to_integer (x, &fail, width),
1886 mode);
1888 case UNSIGNED_FIX:
1889 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1890 return const0_rtx;
1892 /* Test against the unsigned upper bound. */
1893 wmax = wi::max_value (width, UNSIGNED);
1894 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1895 if (real_less (&t, x))
1896 return immed_wide_int_const (wmax, mode);
1898 return immed_wide_int_const (real_to_integer (x, &fail, width),
1899 mode);
1901 default:
1902 gcc_unreachable ();
1906 return NULL_RTX;
1909 /* Subroutine of simplify_binary_operation to simplify a binary operation
1910 CODE that can commute with byte swapping, with result mode MODE and
1911 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1912 Return zero if no simplification or canonicalization is possible. */
1914 static rtx
1915 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1916 rtx op0, rtx op1)
1918 rtx tem;
1920 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1921 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1923 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1924 simplify_gen_unary (BSWAP, mode, op1, mode));
1925 return simplify_gen_unary (BSWAP, mode, tem, mode);
1928 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1929 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1931 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1932 return simplify_gen_unary (BSWAP, mode, tem, mode);
1935 return NULL_RTX;
1938 /* Subroutine of simplify_binary_operation to simplify a commutative,
1939 associative binary operation CODE with result mode MODE, operating
1940 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1941 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1942 canonicalization is possible. */
1944 static rtx
1945 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1946 rtx op0, rtx op1)
1948 rtx tem;
1950 /* Linearize the operator to the left. */
1951 if (GET_CODE (op1) == code)
1953 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1954 if (GET_CODE (op0) == code)
1956 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1957 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1960 /* "a op (b op c)" becomes "(b op c) op a". */
1961 if (! swap_commutative_operands_p (op1, op0))
1962 return simplify_gen_binary (code, mode, op1, op0);
1964 std::swap (op0, op1);
1967 if (GET_CODE (op0) == code)
1969 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1970 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1972 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1973 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1976 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1977 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1978 if (tem != 0)
1979 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1981 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1982 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1983 if (tem != 0)
1984 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1987 return 0;
1991 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1992 and OP1. Return 0 if no simplification is possible.
1994 Don't use this for relational operations such as EQ or LT.
1995 Use simplify_relational_operation instead. */
1997 simplify_binary_operation (enum rtx_code code, machine_mode mode,
1998 rtx op0, rtx op1)
2000 rtx trueop0, trueop1;
2001 rtx tem;
2003 /* Relational operations don't work here. We must know the mode
2004 of the operands in order to do the comparison correctly.
2005 Assuming a full word can give incorrect results.
2006 Consider comparing 128 with -128 in QImode. */
2007 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2008 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2010 /* Make sure the constant is second. */
2011 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2012 && swap_commutative_operands_p (op0, op1))
2013 std::swap (op0, op1);
2015 trueop0 = avoid_constant_pool_reference (op0);
2016 trueop1 = avoid_constant_pool_reference (op1);
2018 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2019 if (tem)
2020 return tem;
2021 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2023 if (tem)
2024 return tem;
2026 /* If the above steps did not result in a simplification and op0 or op1
2027 were constant pool references, use the referenced constants directly. */
2028 if (trueop0 != op0 || trueop1 != op1)
2029 return simplify_gen_binary (code, mode, trueop0, trueop1);
2031 return NULL_RTX;
2034 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2035 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2036 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2037 actual constants. */
2039 static rtx
2040 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2041 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2043 rtx tem, reversed, opleft, opright;
2044 HOST_WIDE_INT val;
2045 unsigned int width = GET_MODE_PRECISION (mode);
2047 /* Even if we can't compute a constant result,
2048 there are some cases worth simplifying. */
2050 switch (code)
2052 case PLUS:
2053 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2054 when x is NaN, infinite, or finite and nonzero. They aren't
2055 when x is -0 and the rounding mode is not towards -infinity,
2056 since (-0) + 0 is then 0. */
2057 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2058 return op0;
2060 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2061 transformations are safe even for IEEE. */
2062 if (GET_CODE (op0) == NEG)
2063 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2064 else if (GET_CODE (op1) == NEG)
2065 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2067 /* (~a) + 1 -> -a */
2068 if (INTEGRAL_MODE_P (mode)
2069 && GET_CODE (op0) == NOT
2070 && trueop1 == const1_rtx)
2071 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2073 /* Handle both-operands-constant cases. We can only add
2074 CONST_INTs to constants since the sum of relocatable symbols
2075 can't be handled by most assemblers. Don't add CONST_INT
2076 to CONST_INT since overflow won't be computed properly if wider
2077 than HOST_BITS_PER_WIDE_INT. */
2079 if ((GET_CODE (op0) == CONST
2080 || GET_CODE (op0) == SYMBOL_REF
2081 || GET_CODE (op0) == LABEL_REF)
2082 && CONST_INT_P (op1))
2083 return plus_constant (mode, op0, INTVAL (op1));
2084 else if ((GET_CODE (op1) == CONST
2085 || GET_CODE (op1) == SYMBOL_REF
2086 || GET_CODE (op1) == LABEL_REF)
2087 && CONST_INT_P (op0))
2088 return plus_constant (mode, op1, INTVAL (op0));
2090 /* See if this is something like X * C - X or vice versa or
2091 if the multiplication is written as a shift. If so, we can
2092 distribute and make a new multiply, shift, or maybe just
2093 have X (if C is 2 in the example above). But don't make
2094 something more expensive than we had before. */
2096 if (SCALAR_INT_MODE_P (mode))
2098 rtx lhs = op0, rhs = op1;
2100 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2101 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2103 if (GET_CODE (lhs) == NEG)
2105 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2106 lhs = XEXP (lhs, 0);
2108 else if (GET_CODE (lhs) == MULT
2109 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2111 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2112 lhs = XEXP (lhs, 0);
2114 else if (GET_CODE (lhs) == ASHIFT
2115 && CONST_INT_P (XEXP (lhs, 1))
2116 && INTVAL (XEXP (lhs, 1)) >= 0
2117 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2119 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2120 GET_MODE_PRECISION (mode));
2121 lhs = XEXP (lhs, 0);
2124 if (GET_CODE (rhs) == NEG)
2126 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2127 rhs = XEXP (rhs, 0);
2129 else if (GET_CODE (rhs) == MULT
2130 && CONST_INT_P (XEXP (rhs, 1)))
2132 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2133 rhs = XEXP (rhs, 0);
2135 else if (GET_CODE (rhs) == ASHIFT
2136 && CONST_INT_P (XEXP (rhs, 1))
2137 && INTVAL (XEXP (rhs, 1)) >= 0
2138 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2140 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2141 GET_MODE_PRECISION (mode));
2142 rhs = XEXP (rhs, 0);
2145 if (rtx_equal_p (lhs, rhs))
2147 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2148 rtx coeff;
2149 bool speed = optimize_function_for_speed_p (cfun);
2151 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2153 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2154 return (set_src_cost (tem, mode, speed)
2155 <= set_src_cost (orig, mode, speed) ? tem : 0);
2159 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2160 if (CONST_SCALAR_INT_P (op1)
2161 && GET_CODE (op0) == XOR
2162 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2163 && mode_signbit_p (mode, op1))
2164 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2165 simplify_gen_binary (XOR, mode, op1,
2166 XEXP (op0, 1)));
2168 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2169 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2170 && GET_CODE (op0) == MULT
2171 && GET_CODE (XEXP (op0, 0)) == NEG)
2173 rtx in1, in2;
2175 in1 = XEXP (XEXP (op0, 0), 0);
2176 in2 = XEXP (op0, 1);
2177 return simplify_gen_binary (MINUS, mode, op1,
2178 simplify_gen_binary (MULT, mode,
2179 in1, in2));
2182 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2183 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2184 is 1. */
2185 if (COMPARISON_P (op0)
2186 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2187 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2188 && (reversed = reversed_comparison (op0, mode)))
2189 return
2190 simplify_gen_unary (NEG, mode, reversed, mode);
2192 /* If one of the operands is a PLUS or a MINUS, see if we can
2193 simplify this by the associative law.
2194 Don't use the associative law for floating point.
2195 The inaccuracy makes it nonassociative,
2196 and subtle programs can break if operations are associated. */
2198 if (INTEGRAL_MODE_P (mode)
2199 && (plus_minus_operand_p (op0)
2200 || plus_minus_operand_p (op1))
2201 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2202 return tem;
2204 /* Reassociate floating point addition only when the user
2205 specifies associative math operations. */
2206 if (FLOAT_MODE_P (mode)
2207 && flag_associative_math)
2209 tem = simplify_associative_operation (code, mode, op0, op1);
2210 if (tem)
2211 return tem;
2213 break;
2215 case COMPARE:
2216 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2217 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2218 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2219 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2221 rtx xop00 = XEXP (op0, 0);
2222 rtx xop10 = XEXP (op1, 0);
2224 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2225 return xop00;
2227 if (REG_P (xop00) && REG_P (xop10)
2228 && GET_MODE (xop00) == GET_MODE (xop10)
2229 && REGNO (xop00) == REGNO (xop10)
2230 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2231 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2232 return xop00;
2234 break;
2236 case MINUS:
2237 /* We can't assume x-x is 0 even with non-IEEE floating point,
2238 but since it is zero except in very strange circumstances, we
2239 will treat it as zero with -ffinite-math-only. */
2240 if (rtx_equal_p (trueop0, trueop1)
2241 && ! side_effects_p (op0)
2242 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2243 return CONST0_RTX (mode);
2245 /* Change subtraction from zero into negation. (0 - x) is the
2246 same as -x when x is NaN, infinite, or finite and nonzero.
2247 But if the mode has signed zeros, and does not round towards
2248 -infinity, then 0 - 0 is 0, not -0. */
2249 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2250 return simplify_gen_unary (NEG, mode, op1, mode);
2252 /* (-1 - a) is ~a. */
2253 if (trueop0 == constm1_rtx)
2254 return simplify_gen_unary (NOT, mode, op1, mode);
2256 /* Subtracting 0 has no effect unless the mode has signed zeros
2257 and supports rounding towards -infinity. In such a case,
2258 0 - 0 is -0. */
2259 if (!(HONOR_SIGNED_ZEROS (mode)
2260 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2261 && trueop1 == CONST0_RTX (mode))
2262 return op0;
2264 /* See if this is something like X * C - X or vice versa or
2265 if the multiplication is written as a shift. If so, we can
2266 distribute and make a new multiply, shift, or maybe just
2267 have X (if C is 2 in the example above). But don't make
2268 something more expensive than we had before. */
2270 if (SCALAR_INT_MODE_P (mode))
2272 rtx lhs = op0, rhs = op1;
2274 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2275 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2277 if (GET_CODE (lhs) == NEG)
2279 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2280 lhs = XEXP (lhs, 0);
2282 else if (GET_CODE (lhs) == MULT
2283 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2285 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2286 lhs = XEXP (lhs, 0);
2288 else if (GET_CODE (lhs) == ASHIFT
2289 && CONST_INT_P (XEXP (lhs, 1))
2290 && INTVAL (XEXP (lhs, 1)) >= 0
2291 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2293 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2294 GET_MODE_PRECISION (mode));
2295 lhs = XEXP (lhs, 0);
2298 if (GET_CODE (rhs) == NEG)
2300 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2301 rhs = XEXP (rhs, 0);
2303 else if (GET_CODE (rhs) == MULT
2304 && CONST_INT_P (XEXP (rhs, 1)))
2306 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2307 rhs = XEXP (rhs, 0);
2309 else if (GET_CODE (rhs) == ASHIFT
2310 && CONST_INT_P (XEXP (rhs, 1))
2311 && INTVAL (XEXP (rhs, 1)) >= 0
2312 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2314 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2315 GET_MODE_PRECISION (mode));
2316 negcoeff1 = -negcoeff1;
2317 rhs = XEXP (rhs, 0);
2320 if (rtx_equal_p (lhs, rhs))
2322 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2323 rtx coeff;
2324 bool speed = optimize_function_for_speed_p (cfun);
2326 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2328 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2329 return (set_src_cost (tem, mode, speed)
2330 <= set_src_cost (orig, mode, speed) ? tem : 0);
2334 /* (a - (-b)) -> (a + b). True even for IEEE. */
2335 if (GET_CODE (op1) == NEG)
2336 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2338 /* (-x - c) may be simplified as (-c - x). */
2339 if (GET_CODE (op0) == NEG
2340 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2342 tem = simplify_unary_operation (NEG, mode, op1, mode);
2343 if (tem)
2344 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2347 /* Don't let a relocatable value get a negative coeff. */
2348 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2349 return simplify_gen_binary (PLUS, mode,
2350 op0,
2351 neg_const_int (mode, op1));
2353 /* (x - (x & y)) -> (x & ~y) */
2354 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2356 if (rtx_equal_p (op0, XEXP (op1, 0)))
2358 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2359 GET_MODE (XEXP (op1, 1)));
2360 return simplify_gen_binary (AND, mode, op0, tem);
2362 if (rtx_equal_p (op0, XEXP (op1, 1)))
2364 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2365 GET_MODE (XEXP (op1, 0)));
2366 return simplify_gen_binary (AND, mode, op0, tem);
2370 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2371 by reversing the comparison code if valid. */
2372 if (STORE_FLAG_VALUE == 1
2373 && trueop0 == const1_rtx
2374 && COMPARISON_P (op1)
2375 && (reversed = reversed_comparison (op1, mode)))
2376 return reversed;
2378 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2379 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2380 && GET_CODE (op1) == MULT
2381 && GET_CODE (XEXP (op1, 0)) == NEG)
2383 rtx in1, in2;
2385 in1 = XEXP (XEXP (op1, 0), 0);
2386 in2 = XEXP (op1, 1);
2387 return simplify_gen_binary (PLUS, mode,
2388 simplify_gen_binary (MULT, mode,
2389 in1, in2),
2390 op0);
2393 /* Canonicalize (minus (neg A) (mult B C)) to
2394 (minus (mult (neg B) C) A). */
2395 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2396 && GET_CODE (op1) == MULT
2397 && GET_CODE (op0) == NEG)
2399 rtx in1, in2;
2401 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2402 in2 = XEXP (op1, 1);
2403 return simplify_gen_binary (MINUS, mode,
2404 simplify_gen_binary (MULT, mode,
2405 in1, in2),
2406 XEXP (op0, 0));
2409 /* If one of the operands is a PLUS or a MINUS, see if we can
2410 simplify this by the associative law. This will, for example,
2411 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2412 Don't use the associative law for floating point.
2413 The inaccuracy makes it nonassociative,
2414 and subtle programs can break if operations are associated. */
2416 if (INTEGRAL_MODE_P (mode)
2417 && (plus_minus_operand_p (op0)
2418 || plus_minus_operand_p (op1))
2419 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2420 return tem;
2421 break;
2423 case MULT:
2424 if (trueop1 == constm1_rtx)
2425 return simplify_gen_unary (NEG, mode, op0, mode);
2427 if (GET_CODE (op0) == NEG)
2429 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2430 /* If op1 is a MULT as well and simplify_unary_operation
2431 just moved the NEG to the second operand, simplify_gen_binary
2432 below could through simplify_associative_operation move
2433 the NEG around again and recurse endlessly. */
2434 if (temp
2435 && GET_CODE (op1) == MULT
2436 && GET_CODE (temp) == MULT
2437 && XEXP (op1, 0) == XEXP (temp, 0)
2438 && GET_CODE (XEXP (temp, 1)) == NEG
2439 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2440 temp = NULL_RTX;
2441 if (temp)
2442 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2444 if (GET_CODE (op1) == NEG)
2446 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2447 /* If op0 is a MULT as well and simplify_unary_operation
2448 just moved the NEG to the second operand, simplify_gen_binary
2449 below could through simplify_associative_operation move
2450 the NEG around again and recurse endlessly. */
2451 if (temp
2452 && GET_CODE (op0) == MULT
2453 && GET_CODE (temp) == MULT
2454 && XEXP (op0, 0) == XEXP (temp, 0)
2455 && GET_CODE (XEXP (temp, 1)) == NEG
2456 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2457 temp = NULL_RTX;
2458 if (temp)
2459 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2462 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2463 x is NaN, since x * 0 is then also NaN. Nor is it valid
2464 when the mode has signed zeros, since multiplying a negative
2465 number by 0 will give -0, not 0. */
2466 if (!HONOR_NANS (mode)
2467 && !HONOR_SIGNED_ZEROS (mode)
2468 && trueop1 == CONST0_RTX (mode)
2469 && ! side_effects_p (op0))
2470 return op1;
2472 /* In IEEE floating point, x*1 is not equivalent to x for
2473 signalling NaNs. */
2474 if (!HONOR_SNANS (mode)
2475 && trueop1 == CONST1_RTX (mode))
2476 return op0;
2478 /* Convert multiply by constant power of two into shift. */
2479 if (CONST_SCALAR_INT_P (trueop1))
2481 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2482 if (val >= 0)
2483 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2486 /* x*2 is x+x and x*(-1) is -x */
2487 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2488 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2489 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2490 && GET_MODE (op0) == mode)
2492 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2494 if (real_equal (d1, &dconst2))
2495 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2497 if (!HONOR_SNANS (mode)
2498 && real_equal (d1, &dconstm1))
2499 return simplify_gen_unary (NEG, mode, op0, mode);
2502 /* Optimize -x * -x as x * x. */
2503 if (FLOAT_MODE_P (mode)
2504 && GET_CODE (op0) == NEG
2505 && GET_CODE (op1) == NEG
2506 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2507 && !side_effects_p (XEXP (op0, 0)))
2508 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2510 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2511 if (SCALAR_FLOAT_MODE_P (mode)
2512 && GET_CODE (op0) == ABS
2513 && GET_CODE (op1) == ABS
2514 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2515 && !side_effects_p (XEXP (op0, 0)))
2516 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2518 /* Reassociate multiplication, but for floating point MULTs
2519 only when the user specifies unsafe math optimizations. */
2520 if (! FLOAT_MODE_P (mode)
2521 || flag_unsafe_math_optimizations)
2523 tem = simplify_associative_operation (code, mode, op0, op1);
2524 if (tem)
2525 return tem;
2527 break;
2529 case IOR:
2530 if (trueop1 == CONST0_RTX (mode))
2531 return op0;
2532 if (INTEGRAL_MODE_P (mode)
2533 && trueop1 == CONSTM1_RTX (mode)
2534 && !side_effects_p (op0))
2535 return op1;
2536 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2537 return op0;
2538 /* A | (~A) -> -1 */
2539 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2540 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2541 && ! side_effects_p (op0)
2542 && SCALAR_INT_MODE_P (mode))
2543 return constm1_rtx;
2545 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2546 if (CONST_INT_P (op1)
2547 && HWI_COMPUTABLE_MODE_P (mode)
2548 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2549 && !side_effects_p (op0))
2550 return op1;
2552 /* Canonicalize (X & C1) | C2. */
2553 if (GET_CODE (op0) == AND
2554 && CONST_INT_P (trueop1)
2555 && CONST_INT_P (XEXP (op0, 1)))
2557 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2558 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2559 HOST_WIDE_INT c2 = INTVAL (trueop1);
2561 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2562 if ((c1 & c2) == c1
2563 && !side_effects_p (XEXP (op0, 0)))
2564 return trueop1;
2566 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2567 if (((c1|c2) & mask) == mask)
2568 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2570 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2571 if (((c1 & ~c2) & mask) != (c1 & mask))
2573 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2574 gen_int_mode (c1 & ~c2, mode));
2575 return simplify_gen_binary (IOR, mode, tem, op1);
2579 /* Convert (A & B) | A to A. */
2580 if (GET_CODE (op0) == AND
2581 && (rtx_equal_p (XEXP (op0, 0), op1)
2582 || rtx_equal_p (XEXP (op0, 1), op1))
2583 && ! side_effects_p (XEXP (op0, 0))
2584 && ! side_effects_p (XEXP (op0, 1)))
2585 return op1;
2587 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2588 mode size to (rotate A CX). */
2590 if (GET_CODE (op1) == ASHIFT
2591 || GET_CODE (op1) == SUBREG)
2593 opleft = op1;
2594 opright = op0;
2596 else
2598 opright = op1;
2599 opleft = op0;
2602 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2603 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2604 && CONST_INT_P (XEXP (opleft, 1))
2605 && CONST_INT_P (XEXP (opright, 1))
2606 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2607 == GET_MODE_PRECISION (mode)))
2608 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2610 /* Same, but for ashift that has been "simplified" to a wider mode
2611 by simplify_shift_const. */
2613 if (GET_CODE (opleft) == SUBREG
2614 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2615 && GET_CODE (opright) == LSHIFTRT
2616 && GET_CODE (XEXP (opright, 0)) == SUBREG
2617 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2618 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2619 && (GET_MODE_SIZE (GET_MODE (opleft))
2620 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2621 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2622 SUBREG_REG (XEXP (opright, 0)))
2623 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2624 && CONST_INT_P (XEXP (opright, 1))
2625 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2626 == GET_MODE_PRECISION (mode)))
2627 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2628 XEXP (SUBREG_REG (opleft), 1));
2630 /* If we have (ior (and (X C1) C2)), simplify this by making
2631 C1 as small as possible if C1 actually changes. */
2632 if (CONST_INT_P (op1)
2633 && (HWI_COMPUTABLE_MODE_P (mode)
2634 || INTVAL (op1) > 0)
2635 && GET_CODE (op0) == AND
2636 && CONST_INT_P (XEXP (op0, 1))
2637 && CONST_INT_P (op1)
2638 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2640 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2641 gen_int_mode (UINTVAL (XEXP (op0, 1))
2642 & ~UINTVAL (op1),
2643 mode));
2644 return simplify_gen_binary (IOR, mode, tmp, op1);
2647 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2648 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2649 the PLUS does not affect any of the bits in OP1: then we can do
2650 the IOR as a PLUS and we can associate. This is valid if OP1
2651 can be safely shifted left C bits. */
2652 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2653 && GET_CODE (XEXP (op0, 0)) == PLUS
2654 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2655 && CONST_INT_P (XEXP (op0, 1))
2656 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2658 int count = INTVAL (XEXP (op0, 1));
2659 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2661 if (mask >> count == INTVAL (trueop1)
2662 && trunc_int_for_mode (mask, mode) == mask
2663 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2664 return simplify_gen_binary (ASHIFTRT, mode,
2665 plus_constant (mode, XEXP (op0, 0),
2666 mask),
2667 XEXP (op0, 1));
2670 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2671 if (tem)
2672 return tem;
2674 tem = simplify_associative_operation (code, mode, op0, op1);
2675 if (tem)
2676 return tem;
2677 break;
2679 case XOR:
2680 if (trueop1 == CONST0_RTX (mode))
2681 return op0;
2682 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2683 return simplify_gen_unary (NOT, mode, op0, mode);
2684 if (rtx_equal_p (trueop0, trueop1)
2685 && ! side_effects_p (op0)
2686 && GET_MODE_CLASS (mode) != MODE_CC)
2687 return CONST0_RTX (mode);
2689 /* Canonicalize XOR of the most significant bit to PLUS. */
2690 if (CONST_SCALAR_INT_P (op1)
2691 && mode_signbit_p (mode, op1))
2692 return simplify_gen_binary (PLUS, mode, op0, op1);
2693 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2694 if (CONST_SCALAR_INT_P (op1)
2695 && GET_CODE (op0) == PLUS
2696 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2697 && mode_signbit_p (mode, XEXP (op0, 1)))
2698 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2699 simplify_gen_binary (XOR, mode, op1,
2700 XEXP (op0, 1)));
2702 /* If we are XORing two things that have no bits in common,
2703 convert them into an IOR. This helps to detect rotation encoded
2704 using those methods and possibly other simplifications. */
2706 if (HWI_COMPUTABLE_MODE_P (mode)
2707 && (nonzero_bits (op0, mode)
2708 & nonzero_bits (op1, mode)) == 0)
2709 return (simplify_gen_binary (IOR, mode, op0, op1));
2711 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2712 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2713 (NOT y). */
2715 int num_negated = 0;
2717 if (GET_CODE (op0) == NOT)
2718 num_negated++, op0 = XEXP (op0, 0);
2719 if (GET_CODE (op1) == NOT)
2720 num_negated++, op1 = XEXP (op1, 0);
2722 if (num_negated == 2)
2723 return simplify_gen_binary (XOR, mode, op0, op1);
2724 else if (num_negated == 1)
2725 return simplify_gen_unary (NOT, mode,
2726 simplify_gen_binary (XOR, mode, op0, op1),
2727 mode);
2730 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2731 correspond to a machine insn or result in further simplifications
2732 if B is a constant. */
2734 if (GET_CODE (op0) == AND
2735 && rtx_equal_p (XEXP (op0, 1), op1)
2736 && ! side_effects_p (op1))
2737 return simplify_gen_binary (AND, mode,
2738 simplify_gen_unary (NOT, mode,
2739 XEXP (op0, 0), mode),
2740 op1);
2742 else if (GET_CODE (op0) == AND
2743 && rtx_equal_p (XEXP (op0, 0), op1)
2744 && ! side_effects_p (op1))
2745 return simplify_gen_binary (AND, mode,
2746 simplify_gen_unary (NOT, mode,
2747 XEXP (op0, 1), mode),
2748 op1);
2750 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2751 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2752 out bits inverted twice and not set by C. Similarly, given
2753 (xor (and (xor A B) C) D), simplify without inverting C in
2754 the xor operand: (xor (and A C) (B&C)^D).
2756 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2757 && GET_CODE (XEXP (op0, 0)) == XOR
2758 && CONST_INT_P (op1)
2759 && CONST_INT_P (XEXP (op0, 1))
2760 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2762 enum rtx_code op = GET_CODE (op0);
2763 rtx a = XEXP (XEXP (op0, 0), 0);
2764 rtx b = XEXP (XEXP (op0, 0), 1);
2765 rtx c = XEXP (op0, 1);
2766 rtx d = op1;
2767 HOST_WIDE_INT bval = INTVAL (b);
2768 HOST_WIDE_INT cval = INTVAL (c);
2769 HOST_WIDE_INT dval = INTVAL (d);
2770 HOST_WIDE_INT xcval;
2772 if (op == IOR)
2773 xcval = ~cval;
2774 else
2775 xcval = cval;
2777 return simplify_gen_binary (XOR, mode,
2778 simplify_gen_binary (op, mode, a, c),
2779 gen_int_mode ((bval & xcval) ^ dval,
2780 mode));
2783 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2784 we can transform like this:
2785 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2786 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2787 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2788 Attempt a few simplifications when B and C are both constants. */
2789 if (GET_CODE (op0) == AND
2790 && CONST_INT_P (op1)
2791 && CONST_INT_P (XEXP (op0, 1)))
2793 rtx a = XEXP (op0, 0);
2794 rtx b = XEXP (op0, 1);
2795 rtx c = op1;
2796 HOST_WIDE_INT bval = INTVAL (b);
2797 HOST_WIDE_INT cval = INTVAL (c);
2799 /* Instead of computing ~A&C, we compute its negated value,
2800 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2801 optimize for sure. If it does not simplify, we still try
2802 to compute ~A&C below, but since that always allocates
2803 RTL, we don't try that before committing to returning a
2804 simplified expression. */
2805 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2806 GEN_INT (~cval));
2808 if ((~cval & bval) == 0)
2810 rtx na_c = NULL_RTX;
2811 if (n_na_c)
2812 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2813 else
2815 /* If ~A does not simplify, don't bother: we don't
2816 want to simplify 2 operations into 3, and if na_c
2817 were to simplify with na, n_na_c would have
2818 simplified as well. */
2819 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2820 if (na)
2821 na_c = simplify_gen_binary (AND, mode, na, c);
2824 /* Try to simplify ~A&C | ~B&C. */
2825 if (na_c != NULL_RTX)
2826 return simplify_gen_binary (IOR, mode, na_c,
2827 gen_int_mode (~bval & cval, mode));
2829 else
2831 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2832 if (n_na_c == CONSTM1_RTX (mode))
2834 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2835 gen_int_mode (~cval & bval,
2836 mode));
2837 return simplify_gen_binary (IOR, mode, a_nc_b,
2838 gen_int_mode (~bval & cval,
2839 mode));
2844 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2845 comparison if STORE_FLAG_VALUE is 1. */
2846 if (STORE_FLAG_VALUE == 1
2847 && trueop1 == const1_rtx
2848 && COMPARISON_P (op0)
2849 && (reversed = reversed_comparison (op0, mode)))
2850 return reversed;
2852 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2853 is (lt foo (const_int 0)), so we can perform the above
2854 simplification if STORE_FLAG_VALUE is 1. */
2856 if (STORE_FLAG_VALUE == 1
2857 && trueop1 == const1_rtx
2858 && GET_CODE (op0) == LSHIFTRT
2859 && CONST_INT_P (XEXP (op0, 1))
2860 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2861 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2863 /* (xor (comparison foo bar) (const_int sign-bit))
2864 when STORE_FLAG_VALUE is the sign bit. */
2865 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2866 && trueop1 == const_true_rtx
2867 && COMPARISON_P (op0)
2868 && (reversed = reversed_comparison (op0, mode)))
2869 return reversed;
2871 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2872 if (tem)
2873 return tem;
2875 tem = simplify_associative_operation (code, mode, op0, op1);
2876 if (tem)
2877 return tem;
2878 break;
2880 case AND:
2881 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2882 return trueop1;
2883 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2884 return op0;
2885 if (HWI_COMPUTABLE_MODE_P (mode))
2887 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2888 HOST_WIDE_INT nzop1;
2889 if (CONST_INT_P (trueop1))
2891 HOST_WIDE_INT val1 = INTVAL (trueop1);
2892 /* If we are turning off bits already known off in OP0, we need
2893 not do an AND. */
2894 if ((nzop0 & ~val1) == 0)
2895 return op0;
2897 nzop1 = nonzero_bits (trueop1, mode);
2898 /* If we are clearing all the nonzero bits, the result is zero. */
2899 if ((nzop1 & nzop0) == 0
2900 && !side_effects_p (op0) && !side_effects_p (op1))
2901 return CONST0_RTX (mode);
2903 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2904 && GET_MODE_CLASS (mode) != MODE_CC)
2905 return op0;
2906 /* A & (~A) -> 0 */
2907 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2908 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2909 && ! side_effects_p (op0)
2910 && GET_MODE_CLASS (mode) != MODE_CC)
2911 return CONST0_RTX (mode);
2913 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2914 there are no nonzero bits of C outside of X's mode. */
2915 if ((GET_CODE (op0) == SIGN_EXTEND
2916 || GET_CODE (op0) == ZERO_EXTEND)
2917 && CONST_INT_P (trueop1)
2918 && HWI_COMPUTABLE_MODE_P (mode)
2919 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2920 & UINTVAL (trueop1)) == 0)
2922 machine_mode imode = GET_MODE (XEXP (op0, 0));
2923 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2924 gen_int_mode (INTVAL (trueop1),
2925 imode));
2926 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2929 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2930 we might be able to further simplify the AND with X and potentially
2931 remove the truncation altogether. */
2932 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2934 rtx x = XEXP (op0, 0);
2935 machine_mode xmode = GET_MODE (x);
2936 tem = simplify_gen_binary (AND, xmode, x,
2937 gen_int_mode (INTVAL (trueop1), xmode));
2938 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2941 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2942 if (GET_CODE (op0) == IOR
2943 && CONST_INT_P (trueop1)
2944 && CONST_INT_P (XEXP (op0, 1)))
2946 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2947 return simplify_gen_binary (IOR, mode,
2948 simplify_gen_binary (AND, mode,
2949 XEXP (op0, 0), op1),
2950 gen_int_mode (tmp, mode));
2953 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2954 insn (and may simplify more). */
2955 if (GET_CODE (op0) == XOR
2956 && rtx_equal_p (XEXP (op0, 0), op1)
2957 && ! side_effects_p (op1))
2958 return simplify_gen_binary (AND, mode,
2959 simplify_gen_unary (NOT, mode,
2960 XEXP (op0, 1), mode),
2961 op1);
2963 if (GET_CODE (op0) == XOR
2964 && rtx_equal_p (XEXP (op0, 1), op1)
2965 && ! side_effects_p (op1))
2966 return simplify_gen_binary (AND, mode,
2967 simplify_gen_unary (NOT, mode,
2968 XEXP (op0, 0), mode),
2969 op1);
2971 /* Similarly for (~(A ^ B)) & A. */
2972 if (GET_CODE (op0) == NOT
2973 && GET_CODE (XEXP (op0, 0)) == XOR
2974 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2975 && ! side_effects_p (op1))
2976 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2978 if (GET_CODE (op0) == NOT
2979 && GET_CODE (XEXP (op0, 0)) == XOR
2980 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2981 && ! side_effects_p (op1))
2982 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2984 /* Convert (A | B) & A to A. */
2985 if (GET_CODE (op0) == IOR
2986 && (rtx_equal_p (XEXP (op0, 0), op1)
2987 || rtx_equal_p (XEXP (op0, 1), op1))
2988 && ! side_effects_p (XEXP (op0, 0))
2989 && ! side_effects_p (XEXP (op0, 1)))
2990 return op1;
2992 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2993 ((A & N) + B) & M -> (A + B) & M
2994 Similarly if (N & M) == 0,
2995 ((A | N) + B) & M -> (A + B) & M
2996 and for - instead of + and/or ^ instead of |.
2997 Also, if (N & M) == 0, then
2998 (A +- N) & M -> A & M. */
2999 if (CONST_INT_P (trueop1)
3000 && HWI_COMPUTABLE_MODE_P (mode)
3001 && ~UINTVAL (trueop1)
3002 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3003 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3005 rtx pmop[2];
3006 int which;
3008 pmop[0] = XEXP (op0, 0);
3009 pmop[1] = XEXP (op0, 1);
3011 if (CONST_INT_P (pmop[1])
3012 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3013 return simplify_gen_binary (AND, mode, pmop[0], op1);
3015 for (which = 0; which < 2; which++)
3017 tem = pmop[which];
3018 switch (GET_CODE (tem))
3020 case AND:
3021 if (CONST_INT_P (XEXP (tem, 1))
3022 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3023 == UINTVAL (trueop1))
3024 pmop[which] = XEXP (tem, 0);
3025 break;
3026 case IOR:
3027 case XOR:
3028 if (CONST_INT_P (XEXP (tem, 1))
3029 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3030 pmop[which] = XEXP (tem, 0);
3031 break;
3032 default:
3033 break;
3037 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3039 tem = simplify_gen_binary (GET_CODE (op0), mode,
3040 pmop[0], pmop[1]);
3041 return simplify_gen_binary (code, mode, tem, op1);
3045 /* (and X (ior (not X) Y) -> (and X Y) */
3046 if (GET_CODE (op1) == IOR
3047 && GET_CODE (XEXP (op1, 0)) == NOT
3048 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3049 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3051 /* (and (ior (not X) Y) X) -> (and X Y) */
3052 if (GET_CODE (op0) == IOR
3053 && GET_CODE (XEXP (op0, 0)) == NOT
3054 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3055 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3057 /* (and X (ior Y (not X)) -> (and X Y) */
3058 if (GET_CODE (op1) == IOR
3059 && GET_CODE (XEXP (op1, 1)) == NOT
3060 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3061 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3063 /* (and (ior Y (not X)) X) -> (and X Y) */
3064 if (GET_CODE (op0) == IOR
3065 && GET_CODE (XEXP (op0, 1)) == NOT
3066 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3067 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3069 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3070 if (tem)
3071 return tem;
3073 tem = simplify_associative_operation (code, mode, op0, op1);
3074 if (tem)
3075 return tem;
3076 break;
3078 case UDIV:
3079 /* 0/x is 0 (or x&0 if x has side-effects). */
3080 if (trueop0 == CONST0_RTX (mode))
3082 if (side_effects_p (op1))
3083 return simplify_gen_binary (AND, mode, op1, trueop0);
3084 return trueop0;
3086 /* x/1 is x. */
3087 if (trueop1 == CONST1_RTX (mode))
3089 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3090 if (tem)
3091 return tem;
3093 /* Convert divide by power of two into shift. */
3094 if (CONST_INT_P (trueop1)
3095 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3096 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3097 break;
3099 case DIV:
3100 /* Handle floating point and integers separately. */
3101 if (SCALAR_FLOAT_MODE_P (mode))
3103 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3104 safe for modes with NaNs, since 0.0 / 0.0 will then be
3105 NaN rather than 0.0. Nor is it safe for modes with signed
3106 zeros, since dividing 0 by a negative number gives -0.0 */
3107 if (trueop0 == CONST0_RTX (mode)
3108 && !HONOR_NANS (mode)
3109 && !HONOR_SIGNED_ZEROS (mode)
3110 && ! side_effects_p (op1))
3111 return op0;
3112 /* x/1.0 is x. */
3113 if (trueop1 == CONST1_RTX (mode)
3114 && !HONOR_SNANS (mode))
3115 return op0;
3117 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3118 && trueop1 != CONST0_RTX (mode))
3120 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3122 /* x/-1.0 is -x. */
3123 if (real_equal (d1, &dconstm1)
3124 && !HONOR_SNANS (mode))
3125 return simplify_gen_unary (NEG, mode, op0, mode);
3127 /* Change FP division by a constant into multiplication.
3128 Only do this with -freciprocal-math. */
3129 if (flag_reciprocal_math
3130 && !real_equal (d1, &dconst0))
3132 REAL_VALUE_TYPE d;
3133 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3134 tem = const_double_from_real_value (d, mode);
3135 return simplify_gen_binary (MULT, mode, op0, tem);
3139 else if (SCALAR_INT_MODE_P (mode))
3141 /* 0/x is 0 (or x&0 if x has side-effects). */
3142 if (trueop0 == CONST0_RTX (mode)
3143 && !cfun->can_throw_non_call_exceptions)
3145 if (side_effects_p (op1))
3146 return simplify_gen_binary (AND, mode, op1, trueop0);
3147 return trueop0;
3149 /* x/1 is x. */
3150 if (trueop1 == CONST1_RTX (mode))
3152 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3153 if (tem)
3154 return tem;
3156 /* x/-1 is -x. */
3157 if (trueop1 == constm1_rtx)
3159 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3160 if (x)
3161 return simplify_gen_unary (NEG, mode, x, mode);
3164 break;
3166 case UMOD:
3167 /* 0%x is 0 (or x&0 if x has side-effects). */
3168 if (trueop0 == CONST0_RTX (mode))
3170 if (side_effects_p (op1))
3171 return simplify_gen_binary (AND, mode, op1, trueop0);
3172 return trueop0;
3174 /* x%1 is 0 (of x&0 if x has side-effects). */
3175 if (trueop1 == CONST1_RTX (mode))
3177 if (side_effects_p (op0))
3178 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3179 return CONST0_RTX (mode);
3181 /* Implement modulus by power of two as AND. */
3182 if (CONST_INT_P (trueop1)
3183 && exact_log2 (UINTVAL (trueop1)) > 0)
3184 return simplify_gen_binary (AND, mode, op0,
3185 gen_int_mode (INTVAL (op1) - 1, mode));
3186 break;
3188 case MOD:
3189 /* 0%x is 0 (or x&0 if x has side-effects). */
3190 if (trueop0 == CONST0_RTX (mode))
3192 if (side_effects_p (op1))
3193 return simplify_gen_binary (AND, mode, op1, trueop0);
3194 return trueop0;
3196 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3197 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3199 if (side_effects_p (op0))
3200 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3201 return CONST0_RTX (mode);
3203 break;
3205 case ROTATERT:
3206 case ROTATE:
3207 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3208 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3209 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3210 amount instead. */
3211 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3212 if (CONST_INT_P (trueop1)
3213 && IN_RANGE (INTVAL (trueop1),
3214 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3215 GET_MODE_PRECISION (mode) - 1))
3216 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3217 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3218 - INTVAL (trueop1)));
3219 #endif
3220 /* FALLTHRU */
3221 case ASHIFTRT:
3222 if (trueop1 == CONST0_RTX (mode))
3223 return op0;
3224 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3225 return op0;
3226 /* Rotating ~0 always results in ~0. */
3227 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3228 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3229 && ! side_effects_p (op1))
3230 return op0;
3231 /* Given:
3232 scalar modes M1, M2
3233 scalar constants c1, c2
3234 size (M2) > size (M1)
3235 c1 == size (M2) - size (M1)
3236 optimize:
3237 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3238 <low_part>)
3239 (const_int <c2>))
3241 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3242 <low_part>). */
3243 if (code == ASHIFTRT
3244 && !VECTOR_MODE_P (mode)
3245 && SUBREG_P (op0)
3246 && CONST_INT_P (op1)
3247 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3248 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3249 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3250 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3251 > GET_MODE_BITSIZE (mode))
3252 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3253 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3254 - GET_MODE_BITSIZE (mode)))
3255 && subreg_lowpart_p (op0))
3257 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3258 + INTVAL (op1));
3259 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3260 tmp = simplify_gen_binary (ASHIFTRT,
3261 GET_MODE (SUBREG_REG (op0)),
3262 XEXP (SUBREG_REG (op0), 0),
3263 tmp);
3264 return lowpart_subreg (mode, tmp, inner_mode);
3266 canonicalize_shift:
3267 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3269 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3270 if (val != INTVAL (op1))
3271 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3273 break;
3275 case ASHIFT:
3276 case SS_ASHIFT:
3277 case US_ASHIFT:
3278 if (trueop1 == CONST0_RTX (mode))
3279 return op0;
3280 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3281 return op0;
3282 goto canonicalize_shift;
3284 case LSHIFTRT:
3285 if (trueop1 == CONST0_RTX (mode))
3286 return op0;
3287 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3288 return op0;
3289 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3290 if (GET_CODE (op0) == CLZ
3291 && CONST_INT_P (trueop1)
3292 && STORE_FLAG_VALUE == 1
3293 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3295 machine_mode imode = GET_MODE (XEXP (op0, 0));
3296 unsigned HOST_WIDE_INT zero_val = 0;
3298 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3299 && zero_val == GET_MODE_PRECISION (imode)
3300 && INTVAL (trueop1) == exact_log2 (zero_val))
3301 return simplify_gen_relational (EQ, mode, imode,
3302 XEXP (op0, 0), const0_rtx);
3304 goto canonicalize_shift;
3306 case SMIN:
3307 if (width <= HOST_BITS_PER_WIDE_INT
3308 && mode_signbit_p (mode, trueop1)
3309 && ! side_effects_p (op0))
3310 return op1;
3311 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3312 return op0;
3313 tem = simplify_associative_operation (code, mode, op0, op1);
3314 if (tem)
3315 return tem;
3316 break;
3318 case SMAX:
3319 if (width <= HOST_BITS_PER_WIDE_INT
3320 && CONST_INT_P (trueop1)
3321 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3322 && ! side_effects_p (op0))
3323 return op1;
3324 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3325 return op0;
3326 tem = simplify_associative_operation (code, mode, op0, op1);
3327 if (tem)
3328 return tem;
3329 break;
3331 case UMIN:
3332 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3333 return op1;
3334 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3335 return op0;
3336 tem = simplify_associative_operation (code, mode, op0, op1);
3337 if (tem)
3338 return tem;
3339 break;
3341 case UMAX:
3342 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3343 return op1;
3344 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3345 return op0;
3346 tem = simplify_associative_operation (code, mode, op0, op1);
3347 if (tem)
3348 return tem;
3349 break;
3351 case SS_PLUS:
3352 case US_PLUS:
3353 case SS_MINUS:
3354 case US_MINUS:
3355 case SS_MULT:
3356 case US_MULT:
3357 case SS_DIV:
3358 case US_DIV:
3359 /* ??? There are simplifications that can be done. */
3360 return 0;
3362 case VEC_SELECT:
3363 if (!VECTOR_MODE_P (mode))
3365 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3366 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3367 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3368 gcc_assert (XVECLEN (trueop1, 0) == 1);
3369 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3371 if (GET_CODE (trueop0) == CONST_VECTOR)
3372 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3373 (trueop1, 0, 0)));
3375 /* Extract a scalar element from a nested VEC_SELECT expression
3376 (with optional nested VEC_CONCAT expression). Some targets
3377 (i386) extract scalar element from a vector using chain of
3378 nested VEC_SELECT expressions. When input operand is a memory
3379 operand, this operation can be simplified to a simple scalar
3380 load from an offseted memory address. */
3381 if (GET_CODE (trueop0) == VEC_SELECT)
3383 rtx op0 = XEXP (trueop0, 0);
3384 rtx op1 = XEXP (trueop0, 1);
3386 machine_mode opmode = GET_MODE (op0);
3387 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3388 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3390 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3391 int elem;
3393 rtvec vec;
3394 rtx tmp_op, tmp;
3396 gcc_assert (GET_CODE (op1) == PARALLEL);
3397 gcc_assert (i < n_elts);
3399 /* Select element, pointed by nested selector. */
3400 elem = INTVAL (XVECEXP (op1, 0, i));
3402 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3403 if (GET_CODE (op0) == VEC_CONCAT)
3405 rtx op00 = XEXP (op0, 0);
3406 rtx op01 = XEXP (op0, 1);
3408 machine_mode mode00, mode01;
3409 int n_elts00, n_elts01;
3411 mode00 = GET_MODE (op00);
3412 mode01 = GET_MODE (op01);
3414 /* Find out number of elements of each operand. */
3415 if (VECTOR_MODE_P (mode00))
3417 elt_size = GET_MODE_UNIT_SIZE (mode00);
3418 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3420 else
3421 n_elts00 = 1;
3423 if (VECTOR_MODE_P (mode01))
3425 elt_size = GET_MODE_UNIT_SIZE (mode01);
3426 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3428 else
3429 n_elts01 = 1;
3431 gcc_assert (n_elts == n_elts00 + n_elts01);
3433 /* Select correct operand of VEC_CONCAT
3434 and adjust selector. */
3435 if (elem < n_elts01)
3436 tmp_op = op00;
3437 else
3439 tmp_op = op01;
3440 elem -= n_elts00;
3443 else
3444 tmp_op = op0;
3446 vec = rtvec_alloc (1);
3447 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3449 tmp = gen_rtx_fmt_ee (code, mode,
3450 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3451 return tmp;
3453 if (GET_CODE (trueop0) == VEC_DUPLICATE
3454 && GET_MODE (XEXP (trueop0, 0)) == mode)
3455 return XEXP (trueop0, 0);
3457 else
3459 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3460 gcc_assert (GET_MODE_INNER (mode)
3461 == GET_MODE_INNER (GET_MODE (trueop0)));
3462 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3464 if (GET_CODE (trueop0) == CONST_VECTOR)
3466 int elt_size = GET_MODE_UNIT_SIZE (mode);
3467 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3468 rtvec v = rtvec_alloc (n_elts);
3469 unsigned int i;
3471 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3472 for (i = 0; i < n_elts; i++)
3474 rtx x = XVECEXP (trueop1, 0, i);
3476 gcc_assert (CONST_INT_P (x));
3477 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3478 INTVAL (x));
3481 return gen_rtx_CONST_VECTOR (mode, v);
3484 /* Recognize the identity. */
3485 if (GET_MODE (trueop0) == mode)
3487 bool maybe_ident = true;
3488 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3490 rtx j = XVECEXP (trueop1, 0, i);
3491 if (!CONST_INT_P (j) || INTVAL (j) != i)
3493 maybe_ident = false;
3494 break;
3497 if (maybe_ident)
3498 return trueop0;
3501 /* If we build {a,b} then permute it, build the result directly. */
3502 if (XVECLEN (trueop1, 0) == 2
3503 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3504 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3505 && GET_CODE (trueop0) == VEC_CONCAT
3506 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3507 && GET_MODE (XEXP (trueop0, 0)) == mode
3508 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3509 && GET_MODE (XEXP (trueop0, 1)) == mode)
3511 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3512 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3513 rtx subop0, subop1;
3515 gcc_assert (i0 < 4 && i1 < 4);
3516 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3517 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3519 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3522 if (XVECLEN (trueop1, 0) == 2
3523 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3524 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3525 && GET_CODE (trueop0) == VEC_CONCAT
3526 && GET_MODE (trueop0) == mode)
3528 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3529 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3530 rtx subop0, subop1;
3532 gcc_assert (i0 < 2 && i1 < 2);
3533 subop0 = XEXP (trueop0, i0);
3534 subop1 = XEXP (trueop0, i1);
3536 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3539 /* If we select one half of a vec_concat, return that. */
3540 if (GET_CODE (trueop0) == VEC_CONCAT
3541 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3543 rtx subop0 = XEXP (trueop0, 0);
3544 rtx subop1 = XEXP (trueop0, 1);
3545 machine_mode mode0 = GET_MODE (subop0);
3546 machine_mode mode1 = GET_MODE (subop1);
3547 int li = GET_MODE_UNIT_SIZE (mode0);
3548 int l0 = GET_MODE_SIZE (mode0) / li;
3549 int l1 = GET_MODE_SIZE (mode1) / li;
3550 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3551 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3553 bool success = true;
3554 for (int i = 1; i < l0; ++i)
3556 rtx j = XVECEXP (trueop1, 0, i);
3557 if (!CONST_INT_P (j) || INTVAL (j) != i)
3559 success = false;
3560 break;
3563 if (success)
3564 return subop0;
3566 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3568 bool success = true;
3569 for (int i = 1; i < l1; ++i)
3571 rtx j = XVECEXP (trueop1, 0, i);
3572 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3574 success = false;
3575 break;
3578 if (success)
3579 return subop1;
3584 if (XVECLEN (trueop1, 0) == 1
3585 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3586 && GET_CODE (trueop0) == VEC_CONCAT)
3588 rtx vec = trueop0;
3589 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3591 /* Try to find the element in the VEC_CONCAT. */
3592 while (GET_MODE (vec) != mode
3593 && GET_CODE (vec) == VEC_CONCAT)
3595 HOST_WIDE_INT vec_size;
3597 if (CONST_INT_P (XEXP (vec, 0)))
3599 /* vec_concat of two const_ints doesn't make sense with
3600 respect to modes. */
3601 if (CONST_INT_P (XEXP (vec, 1)))
3602 return 0;
3604 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3605 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3607 else
3608 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3610 if (offset < vec_size)
3611 vec = XEXP (vec, 0);
3612 else
3614 offset -= vec_size;
3615 vec = XEXP (vec, 1);
3617 vec = avoid_constant_pool_reference (vec);
3620 if (GET_MODE (vec) == mode)
3621 return vec;
3624 /* If we select elements in a vec_merge that all come from the same
3625 operand, select from that operand directly. */
3626 if (GET_CODE (op0) == VEC_MERGE)
3628 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3629 if (CONST_INT_P (trueop02))
3631 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3632 bool all_operand0 = true;
3633 bool all_operand1 = true;
3634 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3636 rtx j = XVECEXP (trueop1, 0, i);
3637 if (sel & (1 << UINTVAL (j)))
3638 all_operand1 = false;
3639 else
3640 all_operand0 = false;
3642 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3643 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3644 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3645 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3649 /* If we have two nested selects that are inverses of each
3650 other, replace them with the source operand. */
3651 if (GET_CODE (trueop0) == VEC_SELECT
3652 && GET_MODE (XEXP (trueop0, 0)) == mode)
3654 rtx op0_subop1 = XEXP (trueop0, 1);
3655 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3656 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3658 /* Apply the outer ordering vector to the inner one. (The inner
3659 ordering vector is expressly permitted to be of a different
3660 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3661 then the two VEC_SELECTs cancel. */
3662 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3664 rtx x = XVECEXP (trueop1, 0, i);
3665 if (!CONST_INT_P (x))
3666 return 0;
3667 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3668 if (!CONST_INT_P (y) || i != INTVAL (y))
3669 return 0;
3671 return XEXP (trueop0, 0);
3674 return 0;
3675 case VEC_CONCAT:
3677 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3678 ? GET_MODE (trueop0)
3679 : GET_MODE_INNER (mode));
3680 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3681 ? GET_MODE (trueop1)
3682 : GET_MODE_INNER (mode));
3684 gcc_assert (VECTOR_MODE_P (mode));
3685 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3686 == GET_MODE_SIZE (mode));
3688 if (VECTOR_MODE_P (op0_mode))
3689 gcc_assert (GET_MODE_INNER (mode)
3690 == GET_MODE_INNER (op0_mode));
3691 else
3692 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3694 if (VECTOR_MODE_P (op1_mode))
3695 gcc_assert (GET_MODE_INNER (mode)
3696 == GET_MODE_INNER (op1_mode));
3697 else
3698 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3700 if ((GET_CODE (trueop0) == CONST_VECTOR
3701 || CONST_SCALAR_INT_P (trueop0)
3702 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3703 && (GET_CODE (trueop1) == CONST_VECTOR
3704 || CONST_SCALAR_INT_P (trueop1)
3705 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3707 int elt_size = GET_MODE_UNIT_SIZE (mode);
3708 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3709 rtvec v = rtvec_alloc (n_elts);
3710 unsigned int i;
3711 unsigned in_n_elts = 1;
3713 if (VECTOR_MODE_P (op0_mode))
3714 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3715 for (i = 0; i < n_elts; i++)
3717 if (i < in_n_elts)
3719 if (!VECTOR_MODE_P (op0_mode))
3720 RTVEC_ELT (v, i) = trueop0;
3721 else
3722 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3724 else
3726 if (!VECTOR_MODE_P (op1_mode))
3727 RTVEC_ELT (v, i) = trueop1;
3728 else
3729 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3730 i - in_n_elts);
3734 return gen_rtx_CONST_VECTOR (mode, v);
3737 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3738 Restrict the transformation to avoid generating a VEC_SELECT with a
3739 mode unrelated to its operand. */
3740 if (GET_CODE (trueop0) == VEC_SELECT
3741 && GET_CODE (trueop1) == VEC_SELECT
3742 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3743 && GET_MODE (XEXP (trueop0, 0)) == mode)
3745 rtx par0 = XEXP (trueop0, 1);
3746 rtx par1 = XEXP (trueop1, 1);
3747 int len0 = XVECLEN (par0, 0);
3748 int len1 = XVECLEN (par1, 0);
3749 rtvec vec = rtvec_alloc (len0 + len1);
3750 for (int i = 0; i < len0; i++)
3751 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3752 for (int i = 0; i < len1; i++)
3753 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3754 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3755 gen_rtx_PARALLEL (VOIDmode, vec));
3758 return 0;
3760 default:
3761 gcc_unreachable ();
3764 return 0;
3768 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3769 rtx op0, rtx op1)
3771 unsigned int width = GET_MODE_PRECISION (mode);
3773 if (VECTOR_MODE_P (mode)
3774 && code != VEC_CONCAT
3775 && GET_CODE (op0) == CONST_VECTOR
3776 && GET_CODE (op1) == CONST_VECTOR)
3778 unsigned n_elts = GET_MODE_NUNITS (mode);
3779 machine_mode op0mode = GET_MODE (op0);
3780 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3781 machine_mode op1mode = GET_MODE (op1);
3782 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3783 rtvec v = rtvec_alloc (n_elts);
3784 unsigned int i;
3786 gcc_assert (op0_n_elts == n_elts);
3787 gcc_assert (op1_n_elts == n_elts);
3788 for (i = 0; i < n_elts; i++)
3790 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3791 CONST_VECTOR_ELT (op0, i),
3792 CONST_VECTOR_ELT (op1, i));
3793 if (!x)
3794 return 0;
3795 RTVEC_ELT (v, i) = x;
3798 return gen_rtx_CONST_VECTOR (mode, v);
3801 if (VECTOR_MODE_P (mode)
3802 && code == VEC_CONCAT
3803 && (CONST_SCALAR_INT_P (op0)
3804 || GET_CODE (op0) == CONST_FIXED
3805 || CONST_DOUBLE_AS_FLOAT_P (op0))
3806 && (CONST_SCALAR_INT_P (op1)
3807 || CONST_DOUBLE_AS_FLOAT_P (op1)
3808 || GET_CODE (op1) == CONST_FIXED))
3810 unsigned n_elts = GET_MODE_NUNITS (mode);
3811 rtvec v = rtvec_alloc (n_elts);
3813 gcc_assert (n_elts >= 2);
3814 if (n_elts == 2)
3816 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3817 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3819 RTVEC_ELT (v, 0) = op0;
3820 RTVEC_ELT (v, 1) = op1;
3822 else
3824 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3825 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3826 unsigned i;
3828 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3829 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3830 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3832 for (i = 0; i < op0_n_elts; ++i)
3833 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3834 for (i = 0; i < op1_n_elts; ++i)
3835 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3838 return gen_rtx_CONST_VECTOR (mode, v);
3841 if (SCALAR_FLOAT_MODE_P (mode)
3842 && CONST_DOUBLE_AS_FLOAT_P (op0)
3843 && CONST_DOUBLE_AS_FLOAT_P (op1)
3844 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3846 if (code == AND
3847 || code == IOR
3848 || code == XOR)
3850 long tmp0[4];
3851 long tmp1[4];
3852 REAL_VALUE_TYPE r;
3853 int i;
3855 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3856 GET_MODE (op0));
3857 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3858 GET_MODE (op1));
3859 for (i = 0; i < 4; i++)
3861 switch (code)
3863 case AND:
3864 tmp0[i] &= tmp1[i];
3865 break;
3866 case IOR:
3867 tmp0[i] |= tmp1[i];
3868 break;
3869 case XOR:
3870 tmp0[i] ^= tmp1[i];
3871 break;
3872 default:
3873 gcc_unreachable ();
3876 real_from_target (&r, tmp0, mode);
3877 return const_double_from_real_value (r, mode);
3879 else
3881 REAL_VALUE_TYPE f0, f1, value, result;
3882 bool inexact;
3884 real_convert (&f0, mode, CONST_DOUBLE_REAL_VALUE (op0));
3885 real_convert (&f1, mode, CONST_DOUBLE_REAL_VALUE (op1));
3887 if (HONOR_SNANS (mode)
3888 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3889 return 0;
3891 if (code == DIV
3892 && real_equal (&f1, &dconst0)
3893 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3894 return 0;
3896 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3897 && flag_trapping_math
3898 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3900 int s0 = REAL_VALUE_NEGATIVE (f0);
3901 int s1 = REAL_VALUE_NEGATIVE (f1);
3903 switch (code)
3905 case PLUS:
3906 /* Inf + -Inf = NaN plus exception. */
3907 if (s0 != s1)
3908 return 0;
3909 break;
3910 case MINUS:
3911 /* Inf - Inf = NaN plus exception. */
3912 if (s0 == s1)
3913 return 0;
3914 break;
3915 case DIV:
3916 /* Inf / Inf = NaN plus exception. */
3917 return 0;
3918 default:
3919 break;
3923 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3924 && flag_trapping_math
3925 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3926 || (REAL_VALUE_ISINF (f1)
3927 && real_equal (&f0, &dconst0))))
3928 /* Inf * 0 = NaN plus exception. */
3929 return 0;
3931 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3932 &f0, &f1);
3933 real_convert (&result, mode, &value);
3935 /* Don't constant fold this floating point operation if
3936 the result has overflowed and flag_trapping_math. */
3938 if (flag_trapping_math
3939 && MODE_HAS_INFINITIES (mode)
3940 && REAL_VALUE_ISINF (result)
3941 && !REAL_VALUE_ISINF (f0)
3942 && !REAL_VALUE_ISINF (f1))
3943 /* Overflow plus exception. */
3944 return 0;
3946 /* Don't constant fold this floating point operation if the
3947 result may dependent upon the run-time rounding mode and
3948 flag_rounding_math is set, or if GCC's software emulation
3949 is unable to accurately represent the result. */
3951 if ((flag_rounding_math
3952 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3953 && (inexact || !real_identical (&result, &value)))
3954 return NULL_RTX;
3956 return const_double_from_real_value (result, mode);
3960 /* We can fold some multi-word operations. */
3961 if ((GET_MODE_CLASS (mode) == MODE_INT
3962 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3963 && CONST_SCALAR_INT_P (op0)
3964 && CONST_SCALAR_INT_P (op1))
3966 wide_int result;
3967 bool overflow;
3968 rtx_mode_t pop0 = std::make_pair (op0, mode);
3969 rtx_mode_t pop1 = std::make_pair (op1, mode);
3971 #if TARGET_SUPPORTS_WIDE_INT == 0
3972 /* This assert keeps the simplification from producing a result
3973 that cannot be represented in a CONST_DOUBLE but a lot of
3974 upstream callers expect that this function never fails to
3975 simplify something and so you if you added this to the test
3976 above the code would die later anyway. If this assert
3977 happens, you just need to make the port support wide int. */
3978 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3979 #endif
3980 switch (code)
3982 case MINUS:
3983 result = wi::sub (pop0, pop1);
3984 break;
3986 case PLUS:
3987 result = wi::add (pop0, pop1);
3988 break;
3990 case MULT:
3991 result = wi::mul (pop0, pop1);
3992 break;
3994 case DIV:
3995 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
3996 if (overflow)
3997 return NULL_RTX;
3998 break;
4000 case MOD:
4001 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4002 if (overflow)
4003 return NULL_RTX;
4004 break;
4006 case UDIV:
4007 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4008 if (overflow)
4009 return NULL_RTX;
4010 break;
4012 case UMOD:
4013 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4014 if (overflow)
4015 return NULL_RTX;
4016 break;
4018 case AND:
4019 result = wi::bit_and (pop0, pop1);
4020 break;
4022 case IOR:
4023 result = wi::bit_or (pop0, pop1);
4024 break;
4026 case XOR:
4027 result = wi::bit_xor (pop0, pop1);
4028 break;
4030 case SMIN:
4031 result = wi::smin (pop0, pop1);
4032 break;
4034 case SMAX:
4035 result = wi::smax (pop0, pop1);
4036 break;
4038 case UMIN:
4039 result = wi::umin (pop0, pop1);
4040 break;
4042 case UMAX:
4043 result = wi::umax (pop0, pop1);
4044 break;
4046 case LSHIFTRT:
4047 case ASHIFTRT:
4048 case ASHIFT:
4050 wide_int wop1 = pop1;
4051 if (SHIFT_COUNT_TRUNCATED)
4052 wop1 = wi::umod_trunc (wop1, width);
4053 else if (wi::geu_p (wop1, width))
4054 return NULL_RTX;
4056 switch (code)
4058 case LSHIFTRT:
4059 result = wi::lrshift (pop0, wop1);
4060 break;
4062 case ASHIFTRT:
4063 result = wi::arshift (pop0, wop1);
4064 break;
4066 case ASHIFT:
4067 result = wi::lshift (pop0, wop1);
4068 break;
4070 default:
4071 gcc_unreachable ();
4073 break;
4075 case ROTATE:
4076 case ROTATERT:
4078 if (wi::neg_p (pop1))
4079 return NULL_RTX;
4081 switch (code)
4083 case ROTATE:
4084 result = wi::lrotate (pop0, pop1);
4085 break;
4087 case ROTATERT:
4088 result = wi::rrotate (pop0, pop1);
4089 break;
4091 default:
4092 gcc_unreachable ();
4094 break;
4096 default:
4097 return NULL_RTX;
4099 return immed_wide_int_const (result, mode);
4102 return NULL_RTX;
4107 /* Return a positive integer if X should sort after Y. The value
4108 returned is 1 if and only if X and Y are both regs. */
4110 static int
4111 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4113 int result;
4115 result = (commutative_operand_precedence (y)
4116 - commutative_operand_precedence (x));
4117 if (result)
4118 return result + result;
4120 /* Group together equal REGs to do more simplification. */
4121 if (REG_P (x) && REG_P (y))
4122 return REGNO (x) > REGNO (y);
4124 return 0;
4127 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4128 operands may be another PLUS or MINUS.
4130 Rather than test for specific case, we do this by a brute-force method
4131 and do all possible simplifications until no more changes occur. Then
4132 we rebuild the operation.
4134 May return NULL_RTX when no changes were made. */
4136 static rtx
4137 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4138 rtx op1)
4140 struct simplify_plus_minus_op_data
4142 rtx op;
4143 short neg;
4144 } ops[16];
4145 rtx result, tem;
4146 int n_ops = 2;
4147 int changed, n_constants, canonicalized = 0;
4148 int i, j;
4150 memset (ops, 0, sizeof ops);
4152 /* Set up the two operands and then expand them until nothing has been
4153 changed. If we run out of room in our array, give up; this should
4154 almost never happen. */
4156 ops[0].op = op0;
4157 ops[0].neg = 0;
4158 ops[1].op = op1;
4159 ops[1].neg = (code == MINUS);
4163 changed = 0;
4164 n_constants = 0;
4166 for (i = 0; i < n_ops; i++)
4168 rtx this_op = ops[i].op;
4169 int this_neg = ops[i].neg;
4170 enum rtx_code this_code = GET_CODE (this_op);
4172 switch (this_code)
4174 case PLUS:
4175 case MINUS:
4176 if (n_ops == ARRAY_SIZE (ops))
4177 return NULL_RTX;
4179 ops[n_ops].op = XEXP (this_op, 1);
4180 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4181 n_ops++;
4183 ops[i].op = XEXP (this_op, 0);
4184 changed = 1;
4185 /* If this operand was negated then we will potentially
4186 canonicalize the expression. Similarly if we don't
4187 place the operands adjacent we're re-ordering the
4188 expression and thus might be performing a
4189 canonicalization. Ignore register re-ordering.
4190 ??? It might be better to shuffle the ops array here,
4191 but then (plus (plus (A, B), plus (C, D))) wouldn't
4192 be seen as non-canonical. */
4193 if (this_neg
4194 || (i != n_ops - 2
4195 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4196 canonicalized = 1;
4197 break;
4199 case NEG:
4200 ops[i].op = XEXP (this_op, 0);
4201 ops[i].neg = ! this_neg;
4202 changed = 1;
4203 canonicalized = 1;
4204 break;
4206 case CONST:
4207 if (n_ops != ARRAY_SIZE (ops)
4208 && GET_CODE (XEXP (this_op, 0)) == PLUS
4209 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4210 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4212 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4213 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4214 ops[n_ops].neg = this_neg;
4215 n_ops++;
4216 changed = 1;
4217 canonicalized = 1;
4219 break;
4221 case NOT:
4222 /* ~a -> (-a - 1) */
4223 if (n_ops != ARRAY_SIZE (ops))
4225 ops[n_ops].op = CONSTM1_RTX (mode);
4226 ops[n_ops++].neg = this_neg;
4227 ops[i].op = XEXP (this_op, 0);
4228 ops[i].neg = !this_neg;
4229 changed = 1;
4230 canonicalized = 1;
4232 break;
4234 case CONST_INT:
4235 n_constants++;
4236 if (this_neg)
4238 ops[i].op = neg_const_int (mode, this_op);
4239 ops[i].neg = 0;
4240 changed = 1;
4241 canonicalized = 1;
4243 break;
4245 default:
4246 break;
4250 while (changed);
4252 if (n_constants > 1)
4253 canonicalized = 1;
4255 gcc_assert (n_ops >= 2);
4257 /* If we only have two operands, we can avoid the loops. */
4258 if (n_ops == 2)
4260 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4261 rtx lhs, rhs;
4263 /* Get the two operands. Be careful with the order, especially for
4264 the cases where code == MINUS. */
4265 if (ops[0].neg && ops[1].neg)
4267 lhs = gen_rtx_NEG (mode, ops[0].op);
4268 rhs = ops[1].op;
4270 else if (ops[0].neg)
4272 lhs = ops[1].op;
4273 rhs = ops[0].op;
4275 else
4277 lhs = ops[0].op;
4278 rhs = ops[1].op;
4281 return simplify_const_binary_operation (code, mode, lhs, rhs);
4284 /* Now simplify each pair of operands until nothing changes. */
4285 while (1)
4287 /* Insertion sort is good enough for a small array. */
4288 for (i = 1; i < n_ops; i++)
4290 struct simplify_plus_minus_op_data save;
4291 int cmp;
4293 j = i - 1;
4294 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4295 if (cmp <= 0)
4296 continue;
4297 /* Just swapping registers doesn't count as canonicalization. */
4298 if (cmp != 1)
4299 canonicalized = 1;
4301 save = ops[i];
4303 ops[j + 1] = ops[j];
4304 while (j--
4305 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4306 ops[j + 1] = save;
4309 changed = 0;
4310 for (i = n_ops - 1; i > 0; i--)
4311 for (j = i - 1; j >= 0; j--)
4313 rtx lhs = ops[j].op, rhs = ops[i].op;
4314 int lneg = ops[j].neg, rneg = ops[i].neg;
4316 if (lhs != 0 && rhs != 0)
4318 enum rtx_code ncode = PLUS;
4320 if (lneg != rneg)
4322 ncode = MINUS;
4323 if (lneg)
4324 std::swap (lhs, rhs);
4326 else if (swap_commutative_operands_p (lhs, rhs))
4327 std::swap (lhs, rhs);
4329 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4330 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4332 rtx tem_lhs, tem_rhs;
4334 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4335 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4336 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4337 tem_rhs);
4339 if (tem && !CONSTANT_P (tem))
4340 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4342 else
4343 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4345 if (tem)
4347 /* Reject "simplifications" that just wrap the two
4348 arguments in a CONST. Failure to do so can result
4349 in infinite recursion with simplify_binary_operation
4350 when it calls us to simplify CONST operations.
4351 Also, if we find such a simplification, don't try
4352 any more combinations with this rhs: We must have
4353 something like symbol+offset, ie. one of the
4354 trivial CONST expressions we handle later. */
4355 if (GET_CODE (tem) == CONST
4356 && GET_CODE (XEXP (tem, 0)) == ncode
4357 && XEXP (XEXP (tem, 0), 0) == lhs
4358 && XEXP (XEXP (tem, 0), 1) == rhs)
4359 break;
4360 lneg &= rneg;
4361 if (GET_CODE (tem) == NEG)
4362 tem = XEXP (tem, 0), lneg = !lneg;
4363 if (CONST_INT_P (tem) && lneg)
4364 tem = neg_const_int (mode, tem), lneg = 0;
4366 ops[i].op = tem;
4367 ops[i].neg = lneg;
4368 ops[j].op = NULL_RTX;
4369 changed = 1;
4370 canonicalized = 1;
4375 if (!changed)
4376 break;
4378 /* Pack all the operands to the lower-numbered entries. */
4379 for (i = 0, j = 0; j < n_ops; j++)
4380 if (ops[j].op)
4382 ops[i] = ops[j];
4383 i++;
4385 n_ops = i;
4388 /* If nothing changed, fail. */
4389 if (!canonicalized)
4390 return NULL_RTX;
4392 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4393 if (n_ops == 2
4394 && CONST_INT_P (ops[1].op)
4395 && CONSTANT_P (ops[0].op)
4396 && ops[0].neg)
4397 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4399 /* We suppressed creation of trivial CONST expressions in the
4400 combination loop to avoid recursion. Create one manually now.
4401 The combination loop should have ensured that there is exactly
4402 one CONST_INT, and the sort will have ensured that it is last
4403 in the array and that any other constant will be next-to-last. */
4405 if (n_ops > 1
4406 && CONST_INT_P (ops[n_ops - 1].op)
4407 && CONSTANT_P (ops[n_ops - 2].op))
4409 rtx value = ops[n_ops - 1].op;
4410 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4411 value = neg_const_int (mode, value);
4412 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4413 INTVAL (value));
4414 n_ops--;
4417 /* Put a non-negated operand first, if possible. */
4419 for (i = 0; i < n_ops && ops[i].neg; i++)
4420 continue;
4421 if (i == n_ops)
4422 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4423 else if (i != 0)
4425 tem = ops[0].op;
4426 ops[0] = ops[i];
4427 ops[i].op = tem;
4428 ops[i].neg = 1;
4431 /* Now make the result by performing the requested operations. */
4432 result = ops[0].op;
4433 for (i = 1; i < n_ops; i++)
4434 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4435 mode, result, ops[i].op);
4437 return result;
4440 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4441 static bool
4442 plus_minus_operand_p (const_rtx x)
4444 return GET_CODE (x) == PLUS
4445 || GET_CODE (x) == MINUS
4446 || (GET_CODE (x) == CONST
4447 && GET_CODE (XEXP (x, 0)) == PLUS
4448 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4449 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4452 /* Like simplify_binary_operation except used for relational operators.
4453 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4454 not also be VOIDmode.
4456 CMP_MODE specifies in which mode the comparison is done in, so it is
4457 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4458 the operands or, if both are VOIDmode, the operands are compared in
4459 "infinite precision". */
4461 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4462 machine_mode cmp_mode, rtx op0, rtx op1)
4464 rtx tem, trueop0, trueop1;
4466 if (cmp_mode == VOIDmode)
4467 cmp_mode = GET_MODE (op0);
4468 if (cmp_mode == VOIDmode)
4469 cmp_mode = GET_MODE (op1);
4471 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4472 if (tem)
4474 if (SCALAR_FLOAT_MODE_P (mode))
4476 if (tem == const0_rtx)
4477 return CONST0_RTX (mode);
4478 #ifdef FLOAT_STORE_FLAG_VALUE
4480 REAL_VALUE_TYPE val;
4481 val = FLOAT_STORE_FLAG_VALUE (mode);
4482 return const_double_from_real_value (val, mode);
4484 #else
4485 return NULL_RTX;
4486 #endif
4488 if (VECTOR_MODE_P (mode))
4490 if (tem == const0_rtx)
4491 return CONST0_RTX (mode);
4492 #ifdef VECTOR_STORE_FLAG_VALUE
4494 int i, units;
4495 rtvec v;
4497 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4498 if (val == NULL_RTX)
4499 return NULL_RTX;
4500 if (val == const1_rtx)
4501 return CONST1_RTX (mode);
4503 units = GET_MODE_NUNITS (mode);
4504 v = rtvec_alloc (units);
4505 for (i = 0; i < units; i++)
4506 RTVEC_ELT (v, i) = val;
4507 return gen_rtx_raw_CONST_VECTOR (mode, v);
4509 #else
4510 return NULL_RTX;
4511 #endif
4514 return tem;
4517 /* For the following tests, ensure const0_rtx is op1. */
4518 if (swap_commutative_operands_p (op0, op1)
4519 || (op0 == const0_rtx && op1 != const0_rtx))
4520 std::swap (op0, op1), code = swap_condition (code);
4522 /* If op0 is a compare, extract the comparison arguments from it. */
4523 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4524 return simplify_gen_relational (code, mode, VOIDmode,
4525 XEXP (op0, 0), XEXP (op0, 1));
4527 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4528 || CC0_P (op0))
4529 return NULL_RTX;
4531 trueop0 = avoid_constant_pool_reference (op0);
4532 trueop1 = avoid_constant_pool_reference (op1);
4533 return simplify_relational_operation_1 (code, mode, cmp_mode,
4534 trueop0, trueop1);
4537 /* This part of simplify_relational_operation is only used when CMP_MODE
4538 is not in class MODE_CC (i.e. it is a real comparison).
4540 MODE is the mode of the result, while CMP_MODE specifies in which
4541 mode the comparison is done in, so it is the mode of the operands. */
4543 static rtx
4544 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4545 machine_mode cmp_mode, rtx op0, rtx op1)
4547 enum rtx_code op0code = GET_CODE (op0);
4549 if (op1 == const0_rtx && COMPARISON_P (op0))
4551 /* If op0 is a comparison, extract the comparison arguments
4552 from it. */
4553 if (code == NE)
4555 if (GET_MODE (op0) == mode)
4556 return simplify_rtx (op0);
4557 else
4558 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4559 XEXP (op0, 0), XEXP (op0, 1));
4561 else if (code == EQ)
4563 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4564 if (new_code != UNKNOWN)
4565 return simplify_gen_relational (new_code, mode, VOIDmode,
4566 XEXP (op0, 0), XEXP (op0, 1));
4570 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4571 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4572 if ((code == LTU || code == GEU)
4573 && GET_CODE (op0) == PLUS
4574 && CONST_INT_P (XEXP (op0, 1))
4575 && (rtx_equal_p (op1, XEXP (op0, 0))
4576 || rtx_equal_p (op1, XEXP (op0, 1)))
4577 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4578 && XEXP (op0, 1) != const0_rtx)
4580 rtx new_cmp
4581 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4582 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4583 cmp_mode, XEXP (op0, 0), new_cmp);
4586 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4587 if ((code == LTU || code == GEU)
4588 && GET_CODE (op0) == PLUS
4589 && rtx_equal_p (op1, XEXP (op0, 1))
4590 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4591 && !rtx_equal_p (op1, XEXP (op0, 0)))
4592 return simplify_gen_relational (code, mode, cmp_mode, op0,
4593 copy_rtx (XEXP (op0, 0)));
4595 if (op1 == const0_rtx)
4597 /* Canonicalize (GTU x 0) as (NE x 0). */
4598 if (code == GTU)
4599 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4600 /* Canonicalize (LEU x 0) as (EQ x 0). */
4601 if (code == LEU)
4602 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4604 else if (op1 == const1_rtx)
4606 switch (code)
4608 case GE:
4609 /* Canonicalize (GE x 1) as (GT x 0). */
4610 return simplify_gen_relational (GT, mode, cmp_mode,
4611 op0, const0_rtx);
4612 case GEU:
4613 /* Canonicalize (GEU x 1) as (NE x 0). */
4614 return simplify_gen_relational (NE, mode, cmp_mode,
4615 op0, const0_rtx);
4616 case LT:
4617 /* Canonicalize (LT x 1) as (LE x 0). */
4618 return simplify_gen_relational (LE, mode, cmp_mode,
4619 op0, const0_rtx);
4620 case LTU:
4621 /* Canonicalize (LTU x 1) as (EQ x 0). */
4622 return simplify_gen_relational (EQ, mode, cmp_mode,
4623 op0, const0_rtx);
4624 default:
4625 break;
4628 else if (op1 == constm1_rtx)
4630 /* Canonicalize (LE x -1) as (LT x 0). */
4631 if (code == LE)
4632 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4633 /* Canonicalize (GT x -1) as (GE x 0). */
4634 if (code == GT)
4635 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4638 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4639 if ((code == EQ || code == NE)
4640 && (op0code == PLUS || op0code == MINUS)
4641 && CONSTANT_P (op1)
4642 && CONSTANT_P (XEXP (op0, 1))
4643 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4645 rtx x = XEXP (op0, 0);
4646 rtx c = XEXP (op0, 1);
4647 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4648 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4650 /* Detect an infinite recursive condition, where we oscillate at this
4651 simplification case between:
4652 A + B == C <---> C - B == A,
4653 where A, B, and C are all constants with non-simplifiable expressions,
4654 usually SYMBOL_REFs. */
4655 if (GET_CODE (tem) == invcode
4656 && CONSTANT_P (x)
4657 && rtx_equal_p (c, XEXP (tem, 1)))
4658 return NULL_RTX;
4660 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4663 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4664 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4665 if (code == NE
4666 && op1 == const0_rtx
4667 && GET_MODE_CLASS (mode) == MODE_INT
4668 && cmp_mode != VOIDmode
4669 /* ??? Work-around BImode bugs in the ia64 backend. */
4670 && mode != BImode
4671 && cmp_mode != BImode
4672 && nonzero_bits (op0, cmp_mode) == 1
4673 && STORE_FLAG_VALUE == 1)
4674 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4675 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4676 : lowpart_subreg (mode, op0, cmp_mode);
4678 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4679 if ((code == EQ || code == NE)
4680 && op1 == const0_rtx
4681 && op0code == XOR)
4682 return simplify_gen_relational (code, mode, cmp_mode,
4683 XEXP (op0, 0), XEXP (op0, 1));
4685 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4686 if ((code == EQ || code == NE)
4687 && op0code == XOR
4688 && rtx_equal_p (XEXP (op0, 0), op1)
4689 && !side_effects_p (XEXP (op0, 0)))
4690 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4691 CONST0_RTX (mode));
4693 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4694 if ((code == EQ || code == NE)
4695 && op0code == XOR
4696 && rtx_equal_p (XEXP (op0, 1), op1)
4697 && !side_effects_p (XEXP (op0, 1)))
4698 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4699 CONST0_RTX (mode));
4701 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4702 if ((code == EQ || code == NE)
4703 && op0code == XOR
4704 && CONST_SCALAR_INT_P (op1)
4705 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4706 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4707 simplify_gen_binary (XOR, cmp_mode,
4708 XEXP (op0, 1), op1));
4710 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4711 can be implemented with a BICS instruction on some targets, or
4712 constant-folded if y is a constant. */
4713 if ((code == EQ || code == NE)
4714 && op0code == AND
4715 && rtx_equal_p (XEXP (op0, 0), op1)
4716 && !side_effects_p (op1)
4717 && op1 != CONST0_RTX (cmp_mode))
4719 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4720 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4722 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4723 CONST0_RTX (cmp_mode));
4726 /* Likewise for (eq/ne (and x y) y). */
4727 if ((code == EQ || code == NE)
4728 && op0code == AND
4729 && rtx_equal_p (XEXP (op0, 1), op1)
4730 && !side_effects_p (op1)
4731 && op1 != CONST0_RTX (cmp_mode))
4733 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4734 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4736 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4737 CONST0_RTX (cmp_mode));
4740 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4741 if ((code == EQ || code == NE)
4742 && GET_CODE (op0) == BSWAP
4743 && CONST_SCALAR_INT_P (op1))
4744 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4745 simplify_gen_unary (BSWAP, cmp_mode,
4746 op1, cmp_mode));
4748 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4749 if ((code == EQ || code == NE)
4750 && GET_CODE (op0) == BSWAP
4751 && GET_CODE (op1) == BSWAP)
4752 return simplify_gen_relational (code, mode, cmp_mode,
4753 XEXP (op0, 0), XEXP (op1, 0));
4755 if (op0code == POPCOUNT && op1 == const0_rtx)
4756 switch (code)
4758 case EQ:
4759 case LE:
4760 case LEU:
4761 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4762 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4763 XEXP (op0, 0), const0_rtx);
4765 case NE:
4766 case GT:
4767 case GTU:
4768 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4769 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4770 XEXP (op0, 0), const0_rtx);
4772 default:
4773 break;
4776 return NULL_RTX;
4779 enum
4781 CMP_EQ = 1,
4782 CMP_LT = 2,
4783 CMP_GT = 4,
4784 CMP_LTU = 8,
4785 CMP_GTU = 16
4789 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4790 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4791 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4792 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4793 For floating-point comparisons, assume that the operands were ordered. */
4795 static rtx
4796 comparison_result (enum rtx_code code, int known_results)
4798 switch (code)
4800 case EQ:
4801 case UNEQ:
4802 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4803 case NE:
4804 case LTGT:
4805 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4807 case LT:
4808 case UNLT:
4809 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4810 case GE:
4811 case UNGE:
4812 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4814 case GT:
4815 case UNGT:
4816 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4817 case LE:
4818 case UNLE:
4819 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4821 case LTU:
4822 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4823 case GEU:
4824 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4826 case GTU:
4827 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4828 case LEU:
4829 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4831 case ORDERED:
4832 return const_true_rtx;
4833 case UNORDERED:
4834 return const0_rtx;
4835 default:
4836 gcc_unreachable ();
4840 /* Check if the given comparison (done in the given MODE) is actually
4841 a tautology or a contradiction. If the mode is VOID_mode, the
4842 comparison is done in "infinite precision". If no simplification
4843 is possible, this function returns zero. Otherwise, it returns
4844 either const_true_rtx or const0_rtx. */
4847 simplify_const_relational_operation (enum rtx_code code,
4848 machine_mode mode,
4849 rtx op0, rtx op1)
4851 rtx tem;
4852 rtx trueop0;
4853 rtx trueop1;
4855 gcc_assert (mode != VOIDmode
4856 || (GET_MODE (op0) == VOIDmode
4857 && GET_MODE (op1) == VOIDmode));
4859 /* If op0 is a compare, extract the comparison arguments from it. */
4860 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4862 op1 = XEXP (op0, 1);
4863 op0 = XEXP (op0, 0);
4865 if (GET_MODE (op0) != VOIDmode)
4866 mode = GET_MODE (op0);
4867 else if (GET_MODE (op1) != VOIDmode)
4868 mode = GET_MODE (op1);
4869 else
4870 return 0;
4873 /* We can't simplify MODE_CC values since we don't know what the
4874 actual comparison is. */
4875 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4876 return 0;
4878 /* Make sure the constant is second. */
4879 if (swap_commutative_operands_p (op0, op1))
4881 std::swap (op0, op1);
4882 code = swap_condition (code);
4885 trueop0 = avoid_constant_pool_reference (op0);
4886 trueop1 = avoid_constant_pool_reference (op1);
4888 /* For integer comparisons of A and B maybe we can simplify A - B and can
4889 then simplify a comparison of that with zero. If A and B are both either
4890 a register or a CONST_INT, this can't help; testing for these cases will
4891 prevent infinite recursion here and speed things up.
4893 We can only do this for EQ and NE comparisons as otherwise we may
4894 lose or introduce overflow which we cannot disregard as undefined as
4895 we do not know the signedness of the operation on either the left or
4896 the right hand side of the comparison. */
4898 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4899 && (code == EQ || code == NE)
4900 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4901 && (REG_P (op1) || CONST_INT_P (trueop1)))
4902 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4903 /* We cannot do this if tem is a nonzero address. */
4904 && ! nonzero_address_p (tem))
4905 return simplify_const_relational_operation (signed_condition (code),
4906 mode, tem, const0_rtx);
4908 if (! HONOR_NANS (mode) && code == ORDERED)
4909 return const_true_rtx;
4911 if (! HONOR_NANS (mode) && code == UNORDERED)
4912 return const0_rtx;
4914 /* For modes without NaNs, if the two operands are equal, we know the
4915 result except if they have side-effects. Even with NaNs we know
4916 the result of unordered comparisons and, if signaling NaNs are
4917 irrelevant, also the result of LT/GT/LTGT. */
4918 if ((! HONOR_NANS (trueop0)
4919 || code == UNEQ || code == UNLE || code == UNGE
4920 || ((code == LT || code == GT || code == LTGT)
4921 && ! HONOR_SNANS (trueop0)))
4922 && rtx_equal_p (trueop0, trueop1)
4923 && ! side_effects_p (trueop0))
4924 return comparison_result (code, CMP_EQ);
4926 /* If the operands are floating-point constants, see if we can fold
4927 the result. */
4928 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4929 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4930 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4932 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4933 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
4935 /* Comparisons are unordered iff at least one of the values is NaN. */
4936 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
4937 switch (code)
4939 case UNEQ:
4940 case UNLT:
4941 case UNGT:
4942 case UNLE:
4943 case UNGE:
4944 case NE:
4945 case UNORDERED:
4946 return const_true_rtx;
4947 case EQ:
4948 case LT:
4949 case GT:
4950 case LE:
4951 case GE:
4952 case LTGT:
4953 case ORDERED:
4954 return const0_rtx;
4955 default:
4956 return 0;
4959 return comparison_result (code,
4960 (real_equal (d0, d1) ? CMP_EQ :
4961 real_less (d0, d1) ? CMP_LT : CMP_GT));
4964 /* Otherwise, see if the operands are both integers. */
4965 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4966 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4968 /* It would be nice if we really had a mode here. However, the
4969 largest int representable on the target is as good as
4970 infinite. */
4971 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4972 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4973 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4975 if (wi::eq_p (ptrueop0, ptrueop1))
4976 return comparison_result (code, CMP_EQ);
4977 else
4979 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4980 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4981 return comparison_result (code, cr);
4985 /* Optimize comparisons with upper and lower bounds. */
4986 if (HWI_COMPUTABLE_MODE_P (mode)
4987 && CONST_INT_P (trueop1)
4988 && !side_effects_p (trueop0))
4990 int sign;
4991 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4992 HOST_WIDE_INT val = INTVAL (trueop1);
4993 HOST_WIDE_INT mmin, mmax;
4995 if (code == GEU
4996 || code == LEU
4997 || code == GTU
4998 || code == LTU)
4999 sign = 0;
5000 else
5001 sign = 1;
5003 /* Get a reduced range if the sign bit is zero. */
5004 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5006 mmin = 0;
5007 mmax = nonzero;
5009 else
5011 rtx mmin_rtx, mmax_rtx;
5012 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5014 mmin = INTVAL (mmin_rtx);
5015 mmax = INTVAL (mmax_rtx);
5016 if (sign)
5018 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5020 mmin >>= (sign_copies - 1);
5021 mmax >>= (sign_copies - 1);
5025 switch (code)
5027 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5028 case GEU:
5029 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5030 return const_true_rtx;
5031 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5032 return const0_rtx;
5033 break;
5034 case GE:
5035 if (val <= mmin)
5036 return const_true_rtx;
5037 if (val > mmax)
5038 return const0_rtx;
5039 break;
5041 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5042 case LEU:
5043 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5044 return const_true_rtx;
5045 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5046 return const0_rtx;
5047 break;
5048 case LE:
5049 if (val >= mmax)
5050 return const_true_rtx;
5051 if (val < mmin)
5052 return const0_rtx;
5053 break;
5055 case EQ:
5056 /* x == y is always false for y out of range. */
5057 if (val < mmin || val > mmax)
5058 return const0_rtx;
5059 break;
5061 /* x > y is always false for y >= mmax, always true for y < mmin. */
5062 case GTU:
5063 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5064 return const0_rtx;
5065 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5066 return const_true_rtx;
5067 break;
5068 case GT:
5069 if (val >= mmax)
5070 return const0_rtx;
5071 if (val < mmin)
5072 return const_true_rtx;
5073 break;
5075 /* x < y is always false for y <= mmin, always true for y > mmax. */
5076 case LTU:
5077 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5078 return const0_rtx;
5079 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5080 return const_true_rtx;
5081 break;
5082 case LT:
5083 if (val <= mmin)
5084 return const0_rtx;
5085 if (val > mmax)
5086 return const_true_rtx;
5087 break;
5089 case NE:
5090 /* x != y is always true for y out of range. */
5091 if (val < mmin || val > mmax)
5092 return const_true_rtx;
5093 break;
5095 default:
5096 break;
5100 /* Optimize integer comparisons with zero. */
5101 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5103 /* Some addresses are known to be nonzero. We don't know
5104 their sign, but equality comparisons are known. */
5105 if (nonzero_address_p (trueop0))
5107 if (code == EQ || code == LEU)
5108 return const0_rtx;
5109 if (code == NE || code == GTU)
5110 return const_true_rtx;
5113 /* See if the first operand is an IOR with a constant. If so, we
5114 may be able to determine the result of this comparison. */
5115 if (GET_CODE (op0) == IOR)
5117 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5118 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5120 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5121 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5122 && (UINTVAL (inner_const)
5123 & ((unsigned HOST_WIDE_INT) 1
5124 << sign_bitnum)));
5126 switch (code)
5128 case EQ:
5129 case LEU:
5130 return const0_rtx;
5131 case NE:
5132 case GTU:
5133 return const_true_rtx;
5134 case LT:
5135 case LE:
5136 if (has_sign)
5137 return const_true_rtx;
5138 break;
5139 case GT:
5140 case GE:
5141 if (has_sign)
5142 return const0_rtx;
5143 break;
5144 default:
5145 break;
5151 /* Optimize comparison of ABS with zero. */
5152 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5153 && (GET_CODE (trueop0) == ABS
5154 || (GET_CODE (trueop0) == FLOAT_EXTEND
5155 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5157 switch (code)
5159 case LT:
5160 /* Optimize abs(x) < 0.0. */
5161 if (!HONOR_SNANS (mode)
5162 && (!INTEGRAL_MODE_P (mode)
5163 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5165 if (INTEGRAL_MODE_P (mode)
5166 && (issue_strict_overflow_warning
5167 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5168 warning (OPT_Wstrict_overflow,
5169 ("assuming signed overflow does not occur when "
5170 "assuming abs (x) < 0 is false"));
5171 return const0_rtx;
5173 break;
5175 case GE:
5176 /* Optimize abs(x) >= 0.0. */
5177 if (!HONOR_NANS (mode)
5178 && (!INTEGRAL_MODE_P (mode)
5179 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5181 if (INTEGRAL_MODE_P (mode)
5182 && (issue_strict_overflow_warning
5183 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5184 warning (OPT_Wstrict_overflow,
5185 ("assuming signed overflow does not occur when "
5186 "assuming abs (x) >= 0 is true"));
5187 return const_true_rtx;
5189 break;
5191 case UNGE:
5192 /* Optimize ! (abs(x) < 0.0). */
5193 return const_true_rtx;
5195 default:
5196 break;
5200 return 0;
5203 /* Simplify CODE, an operation with result mode MODE and three operands,
5204 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5205 a constant. Return 0 if no simplifications is possible. */
5208 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5209 machine_mode op0_mode, rtx op0, rtx op1,
5210 rtx op2)
5212 unsigned int width = GET_MODE_PRECISION (mode);
5213 bool any_change = false;
5214 rtx tem, trueop2;
5216 /* VOIDmode means "infinite" precision. */
5217 if (width == 0)
5218 width = HOST_BITS_PER_WIDE_INT;
5220 switch (code)
5222 case FMA:
5223 /* Simplify negations around the multiplication. */
5224 /* -a * -b + c => a * b + c. */
5225 if (GET_CODE (op0) == NEG)
5227 tem = simplify_unary_operation (NEG, mode, op1, mode);
5228 if (tem)
5229 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5231 else if (GET_CODE (op1) == NEG)
5233 tem = simplify_unary_operation (NEG, mode, op0, mode);
5234 if (tem)
5235 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5238 /* Canonicalize the two multiplication operands. */
5239 /* a * -b + c => -b * a + c. */
5240 if (swap_commutative_operands_p (op0, op1))
5241 std::swap (op0, op1), any_change = true;
5243 if (any_change)
5244 return gen_rtx_FMA (mode, op0, op1, op2);
5245 return NULL_RTX;
5247 case SIGN_EXTRACT:
5248 case ZERO_EXTRACT:
5249 if (CONST_INT_P (op0)
5250 && CONST_INT_P (op1)
5251 && CONST_INT_P (op2)
5252 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5253 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5255 /* Extracting a bit-field from a constant */
5256 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5257 HOST_WIDE_INT op1val = INTVAL (op1);
5258 HOST_WIDE_INT op2val = INTVAL (op2);
5259 if (BITS_BIG_ENDIAN)
5260 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5261 else
5262 val >>= op2val;
5264 if (HOST_BITS_PER_WIDE_INT != op1val)
5266 /* First zero-extend. */
5267 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5268 /* If desired, propagate sign bit. */
5269 if (code == SIGN_EXTRACT
5270 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5271 != 0)
5272 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5275 return gen_int_mode (val, mode);
5277 break;
5279 case IF_THEN_ELSE:
5280 if (CONST_INT_P (op0))
5281 return op0 != const0_rtx ? op1 : op2;
5283 /* Convert c ? a : a into "a". */
5284 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5285 return op1;
5287 /* Convert a != b ? a : b into "a". */
5288 if (GET_CODE (op0) == NE
5289 && ! side_effects_p (op0)
5290 && ! HONOR_NANS (mode)
5291 && ! HONOR_SIGNED_ZEROS (mode)
5292 && ((rtx_equal_p (XEXP (op0, 0), op1)
5293 && rtx_equal_p (XEXP (op0, 1), op2))
5294 || (rtx_equal_p (XEXP (op0, 0), op2)
5295 && rtx_equal_p (XEXP (op0, 1), op1))))
5296 return op1;
5298 /* Convert a == b ? a : b into "b". */
5299 if (GET_CODE (op0) == EQ
5300 && ! side_effects_p (op0)
5301 && ! HONOR_NANS (mode)
5302 && ! HONOR_SIGNED_ZEROS (mode)
5303 && ((rtx_equal_p (XEXP (op0, 0), op1)
5304 && rtx_equal_p (XEXP (op0, 1), op2))
5305 || (rtx_equal_p (XEXP (op0, 0), op2)
5306 && rtx_equal_p (XEXP (op0, 1), op1))))
5307 return op2;
5309 /* Convert (!c) != {0,...,0} ? a : b into
5310 c != {0,...,0} ? b : a for vector modes. */
5311 if (VECTOR_MODE_P (GET_MODE (op1))
5312 && GET_CODE (op0) == NE
5313 && GET_CODE (XEXP (op0, 0)) == NOT
5314 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5316 rtx cv = XEXP (op0, 1);
5317 int nunits = CONST_VECTOR_NUNITS (cv);
5318 bool ok = true;
5319 for (int i = 0; i < nunits; ++i)
5320 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5322 ok = false;
5323 break;
5325 if (ok)
5327 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5328 XEXP (XEXP (op0, 0), 0),
5329 XEXP (op0, 1));
5330 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5331 return retval;
5335 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5337 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5338 ? GET_MODE (XEXP (op0, 1))
5339 : GET_MODE (XEXP (op0, 0)));
5340 rtx temp;
5342 /* Look for happy constants in op1 and op2. */
5343 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5345 HOST_WIDE_INT t = INTVAL (op1);
5346 HOST_WIDE_INT f = INTVAL (op2);
5348 if (t == STORE_FLAG_VALUE && f == 0)
5349 code = GET_CODE (op0);
5350 else if (t == 0 && f == STORE_FLAG_VALUE)
5352 enum rtx_code tmp;
5353 tmp = reversed_comparison_code (op0, NULL_RTX);
5354 if (tmp == UNKNOWN)
5355 break;
5356 code = tmp;
5358 else
5359 break;
5361 return simplify_gen_relational (code, mode, cmp_mode,
5362 XEXP (op0, 0), XEXP (op0, 1));
5365 if (cmp_mode == VOIDmode)
5366 cmp_mode = op0_mode;
5367 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5368 cmp_mode, XEXP (op0, 0),
5369 XEXP (op0, 1));
5371 /* See if any simplifications were possible. */
5372 if (temp)
5374 if (CONST_INT_P (temp))
5375 return temp == const0_rtx ? op2 : op1;
5376 else if (temp)
5377 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5380 break;
5382 case VEC_MERGE:
5383 gcc_assert (GET_MODE (op0) == mode);
5384 gcc_assert (GET_MODE (op1) == mode);
5385 gcc_assert (VECTOR_MODE_P (mode));
5386 trueop2 = avoid_constant_pool_reference (op2);
5387 if (CONST_INT_P (trueop2))
5389 int elt_size = GET_MODE_UNIT_SIZE (mode);
5390 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5391 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5392 unsigned HOST_WIDE_INT mask;
5393 if (n_elts == HOST_BITS_PER_WIDE_INT)
5394 mask = -1;
5395 else
5396 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5398 if (!(sel & mask) && !side_effects_p (op0))
5399 return op1;
5400 if ((sel & mask) == mask && !side_effects_p (op1))
5401 return op0;
5403 rtx trueop0 = avoid_constant_pool_reference (op0);
5404 rtx trueop1 = avoid_constant_pool_reference (op1);
5405 if (GET_CODE (trueop0) == CONST_VECTOR
5406 && GET_CODE (trueop1) == CONST_VECTOR)
5408 rtvec v = rtvec_alloc (n_elts);
5409 unsigned int i;
5411 for (i = 0; i < n_elts; i++)
5412 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5413 ? CONST_VECTOR_ELT (trueop0, i)
5414 : CONST_VECTOR_ELT (trueop1, i));
5415 return gen_rtx_CONST_VECTOR (mode, v);
5418 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5419 if no element from a appears in the result. */
5420 if (GET_CODE (op0) == VEC_MERGE)
5422 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5423 if (CONST_INT_P (tem))
5425 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5426 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5427 return simplify_gen_ternary (code, mode, mode,
5428 XEXP (op0, 1), op1, op2);
5429 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5430 return simplify_gen_ternary (code, mode, mode,
5431 XEXP (op0, 0), op1, op2);
5434 if (GET_CODE (op1) == VEC_MERGE)
5436 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5437 if (CONST_INT_P (tem))
5439 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5440 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5441 return simplify_gen_ternary (code, mode, mode,
5442 op0, XEXP (op1, 1), op2);
5443 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5444 return simplify_gen_ternary (code, mode, mode,
5445 op0, XEXP (op1, 0), op2);
5449 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5450 with a. */
5451 if (GET_CODE (op0) == VEC_DUPLICATE
5452 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5453 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5454 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5456 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5457 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5459 if (XEXP (XEXP (op0, 0), 0) == op1
5460 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5461 return op1;
5466 if (rtx_equal_p (op0, op1)
5467 && !side_effects_p (op2) && !side_effects_p (op1))
5468 return op0;
5470 break;
5472 default:
5473 gcc_unreachable ();
5476 return 0;
5479 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5480 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5481 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5483 Works by unpacking OP into a collection of 8-bit values
5484 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5485 and then repacking them again for OUTERMODE. */
5487 static rtx
5488 simplify_immed_subreg (machine_mode outermode, rtx op,
5489 machine_mode innermode, unsigned int byte)
5491 enum {
5492 value_bit = 8,
5493 value_mask = (1 << value_bit) - 1
5495 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5496 int value_start;
5497 int i;
5498 int elem;
5500 int num_elem;
5501 rtx * elems;
5502 int elem_bitsize;
5503 rtx result_s;
5504 rtvec result_v = NULL;
5505 enum mode_class outer_class;
5506 machine_mode outer_submode;
5507 int max_bitsize;
5509 /* Some ports misuse CCmode. */
5510 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5511 return op;
5513 /* We have no way to represent a complex constant at the rtl level. */
5514 if (COMPLEX_MODE_P (outermode))
5515 return NULL_RTX;
5517 /* We support any size mode. */
5518 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5519 GET_MODE_BITSIZE (innermode));
5521 /* Unpack the value. */
5523 if (GET_CODE (op) == CONST_VECTOR)
5525 num_elem = CONST_VECTOR_NUNITS (op);
5526 elems = &CONST_VECTOR_ELT (op, 0);
5527 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5529 else
5531 num_elem = 1;
5532 elems = &op;
5533 elem_bitsize = max_bitsize;
5535 /* If this asserts, it is too complicated; reducing value_bit may help. */
5536 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5537 /* I don't know how to handle endianness of sub-units. */
5538 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5540 for (elem = 0; elem < num_elem; elem++)
5542 unsigned char * vp;
5543 rtx el = elems[elem];
5545 /* Vectors are kept in target memory order. (This is probably
5546 a mistake.) */
5548 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5549 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5550 / BITS_PER_UNIT);
5551 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5552 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5553 unsigned bytele = (subword_byte % UNITS_PER_WORD
5554 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5555 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5558 switch (GET_CODE (el))
5560 case CONST_INT:
5561 for (i = 0;
5562 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5563 i += value_bit)
5564 *vp++ = INTVAL (el) >> i;
5565 /* CONST_INTs are always logically sign-extended. */
5566 for (; i < elem_bitsize; i += value_bit)
5567 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5568 break;
5570 case CONST_WIDE_INT:
5572 rtx_mode_t val = std::make_pair (el, innermode);
5573 unsigned char extend = wi::sign_mask (val);
5575 for (i = 0; i < elem_bitsize; i += value_bit)
5576 *vp++ = wi::extract_uhwi (val, i, value_bit);
5577 for (; i < elem_bitsize; i += value_bit)
5578 *vp++ = extend;
5580 break;
5582 case CONST_DOUBLE:
5583 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5585 unsigned char extend = 0;
5586 /* If this triggers, someone should have generated a
5587 CONST_INT instead. */
5588 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5590 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5591 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5592 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5594 *vp++
5595 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5596 i += value_bit;
5599 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5600 extend = -1;
5601 for (; i < elem_bitsize; i += value_bit)
5602 *vp++ = extend;
5604 else
5606 /* This is big enough for anything on the platform. */
5607 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5608 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5610 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5611 gcc_assert (bitsize <= elem_bitsize);
5612 gcc_assert (bitsize % value_bit == 0);
5614 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5615 GET_MODE (el));
5617 /* real_to_target produces its result in words affected by
5618 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5619 and use WORDS_BIG_ENDIAN instead; see the documentation
5620 of SUBREG in rtl.texi. */
5621 for (i = 0; i < bitsize; i += value_bit)
5623 int ibase;
5624 if (WORDS_BIG_ENDIAN)
5625 ibase = bitsize - 1 - i;
5626 else
5627 ibase = i;
5628 *vp++ = tmp[ibase / 32] >> i % 32;
5631 /* It shouldn't matter what's done here, so fill it with
5632 zero. */
5633 for (; i < elem_bitsize; i += value_bit)
5634 *vp++ = 0;
5636 break;
5638 case CONST_FIXED:
5639 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5641 for (i = 0; i < elem_bitsize; i += value_bit)
5642 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5644 else
5646 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5647 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5648 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5649 i += value_bit)
5650 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5651 >> (i - HOST_BITS_PER_WIDE_INT);
5652 for (; i < elem_bitsize; i += value_bit)
5653 *vp++ = 0;
5655 break;
5657 default:
5658 gcc_unreachable ();
5662 /* Now, pick the right byte to start with. */
5663 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5664 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5665 will already have offset 0. */
5666 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5668 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5669 - byte);
5670 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5671 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5672 byte = (subword_byte % UNITS_PER_WORD
5673 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5676 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5677 so if it's become negative it will instead be very large.) */
5678 gcc_assert (byte < GET_MODE_SIZE (innermode));
5680 /* Convert from bytes to chunks of size value_bit. */
5681 value_start = byte * (BITS_PER_UNIT / value_bit);
5683 /* Re-pack the value. */
5684 num_elem = GET_MODE_NUNITS (outermode);
5686 if (VECTOR_MODE_P (outermode))
5688 result_v = rtvec_alloc (num_elem);
5689 elems = &RTVEC_ELT (result_v, 0);
5691 else
5692 elems = &result_s;
5694 outer_submode = GET_MODE_INNER (outermode);
5695 outer_class = GET_MODE_CLASS (outer_submode);
5696 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5698 gcc_assert (elem_bitsize % value_bit == 0);
5699 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5701 for (elem = 0; elem < num_elem; elem++)
5703 unsigned char *vp;
5705 /* Vectors are stored in target memory order. (This is probably
5706 a mistake.) */
5708 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5709 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5710 / BITS_PER_UNIT);
5711 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5712 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5713 unsigned bytele = (subword_byte % UNITS_PER_WORD
5714 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5715 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5718 switch (outer_class)
5720 case MODE_INT:
5721 case MODE_PARTIAL_INT:
5723 int u;
5724 int base = 0;
5725 int units
5726 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5727 / HOST_BITS_PER_WIDE_INT;
5728 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5729 wide_int r;
5731 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5732 return NULL_RTX;
5733 for (u = 0; u < units; u++)
5735 unsigned HOST_WIDE_INT buf = 0;
5736 for (i = 0;
5737 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5738 i += value_bit)
5739 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5741 tmp[u] = buf;
5742 base += HOST_BITS_PER_WIDE_INT;
5744 r = wide_int::from_array (tmp, units,
5745 GET_MODE_PRECISION (outer_submode));
5746 #if TARGET_SUPPORTS_WIDE_INT == 0
5747 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5748 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5749 return NULL_RTX;
5750 #endif
5751 elems[elem] = immed_wide_int_const (r, outer_submode);
5753 break;
5755 case MODE_FLOAT:
5756 case MODE_DECIMAL_FLOAT:
5758 REAL_VALUE_TYPE r;
5759 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5761 /* real_from_target wants its input in words affected by
5762 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5763 and use WORDS_BIG_ENDIAN instead; see the documentation
5764 of SUBREG in rtl.texi. */
5765 for (i = 0; i < max_bitsize / 32; i++)
5766 tmp[i] = 0;
5767 for (i = 0; i < elem_bitsize; i += value_bit)
5769 int ibase;
5770 if (WORDS_BIG_ENDIAN)
5771 ibase = elem_bitsize - 1 - i;
5772 else
5773 ibase = i;
5774 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5777 real_from_target (&r, tmp, outer_submode);
5778 elems[elem] = const_double_from_real_value (r, outer_submode);
5780 break;
5782 case MODE_FRACT:
5783 case MODE_UFRACT:
5784 case MODE_ACCUM:
5785 case MODE_UACCUM:
5787 FIXED_VALUE_TYPE f;
5788 f.data.low = 0;
5789 f.data.high = 0;
5790 f.mode = outer_submode;
5792 for (i = 0;
5793 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5794 i += value_bit)
5795 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5796 for (; i < elem_bitsize; i += value_bit)
5797 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5798 << (i - HOST_BITS_PER_WIDE_INT));
5800 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5802 break;
5804 default:
5805 gcc_unreachable ();
5808 if (VECTOR_MODE_P (outermode))
5809 return gen_rtx_CONST_VECTOR (outermode, result_v);
5810 else
5811 return result_s;
5814 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5815 Return 0 if no simplifications are possible. */
5817 simplify_subreg (machine_mode outermode, rtx op,
5818 machine_mode innermode, unsigned int byte)
5820 /* Little bit of sanity checking. */
5821 gcc_assert (innermode != VOIDmode);
5822 gcc_assert (outermode != VOIDmode);
5823 gcc_assert (innermode != BLKmode);
5824 gcc_assert (outermode != BLKmode);
5826 gcc_assert (GET_MODE (op) == innermode
5827 || GET_MODE (op) == VOIDmode);
5829 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5830 return NULL_RTX;
5832 if (byte >= GET_MODE_SIZE (innermode))
5833 return NULL_RTX;
5835 if (outermode == innermode && !byte)
5836 return op;
5838 if (CONST_SCALAR_INT_P (op)
5839 || CONST_DOUBLE_AS_FLOAT_P (op)
5840 || GET_CODE (op) == CONST_FIXED
5841 || GET_CODE (op) == CONST_VECTOR)
5842 return simplify_immed_subreg (outermode, op, innermode, byte);
5844 /* Changing mode twice with SUBREG => just change it once,
5845 or not at all if changing back op starting mode. */
5846 if (GET_CODE (op) == SUBREG)
5848 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5849 int final_offset = byte + SUBREG_BYTE (op);
5850 rtx newx;
5852 if (outermode == innermostmode
5853 && byte == 0 && SUBREG_BYTE (op) == 0)
5854 return SUBREG_REG (op);
5856 /* The SUBREG_BYTE represents offset, as if the value were stored
5857 in memory. Irritating exception is paradoxical subreg, where
5858 we define SUBREG_BYTE to be 0. On big endian machines, this
5859 value should be negative. For a moment, undo this exception. */
5860 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5862 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5863 if (WORDS_BIG_ENDIAN)
5864 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5865 if (BYTES_BIG_ENDIAN)
5866 final_offset += difference % UNITS_PER_WORD;
5868 if (SUBREG_BYTE (op) == 0
5869 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5871 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5872 if (WORDS_BIG_ENDIAN)
5873 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5874 if (BYTES_BIG_ENDIAN)
5875 final_offset += difference % UNITS_PER_WORD;
5878 /* See whether resulting subreg will be paradoxical. */
5879 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5881 /* In nonparadoxical subregs we can't handle negative offsets. */
5882 if (final_offset < 0)
5883 return NULL_RTX;
5884 /* Bail out in case resulting subreg would be incorrect. */
5885 if (final_offset % GET_MODE_SIZE (outermode)
5886 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5887 return NULL_RTX;
5889 else
5891 int offset = 0;
5892 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5894 /* In paradoxical subreg, see if we are still looking on lower part.
5895 If so, our SUBREG_BYTE will be 0. */
5896 if (WORDS_BIG_ENDIAN)
5897 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5898 if (BYTES_BIG_ENDIAN)
5899 offset += difference % UNITS_PER_WORD;
5900 if (offset == final_offset)
5901 final_offset = 0;
5902 else
5903 return NULL_RTX;
5906 /* Recurse for further possible simplifications. */
5907 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5908 final_offset);
5909 if (newx)
5910 return newx;
5911 if (validate_subreg (outermode, innermostmode,
5912 SUBREG_REG (op), final_offset))
5914 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5915 if (SUBREG_PROMOTED_VAR_P (op)
5916 && SUBREG_PROMOTED_SIGN (op) >= 0
5917 && GET_MODE_CLASS (outermode) == MODE_INT
5918 && IN_RANGE (GET_MODE_SIZE (outermode),
5919 GET_MODE_SIZE (innermode),
5920 GET_MODE_SIZE (innermostmode))
5921 && subreg_lowpart_p (newx))
5923 SUBREG_PROMOTED_VAR_P (newx) = 1;
5924 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5926 return newx;
5928 return NULL_RTX;
5931 /* SUBREG of a hard register => just change the register number
5932 and/or mode. If the hard register is not valid in that mode,
5933 suppress this simplification. If the hard register is the stack,
5934 frame, or argument pointer, leave this as a SUBREG. */
5936 if (REG_P (op) && HARD_REGISTER_P (op))
5938 unsigned int regno, final_regno;
5940 regno = REGNO (op);
5941 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5942 if (HARD_REGISTER_NUM_P (final_regno))
5944 rtx x;
5945 int final_offset = byte;
5947 /* Adjust offset for paradoxical subregs. */
5948 if (byte == 0
5949 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5951 int difference = (GET_MODE_SIZE (innermode)
5952 - GET_MODE_SIZE (outermode));
5953 if (WORDS_BIG_ENDIAN)
5954 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5955 if (BYTES_BIG_ENDIAN)
5956 final_offset += difference % UNITS_PER_WORD;
5959 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5961 /* Propagate original regno. We don't have any way to specify
5962 the offset inside original regno, so do so only for lowpart.
5963 The information is used only by alias analysis that can not
5964 grog partial register anyway. */
5966 if (subreg_lowpart_offset (outermode, innermode) == byte)
5967 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5968 return x;
5972 /* If we have a SUBREG of a register that we are replacing and we are
5973 replacing it with a MEM, make a new MEM and try replacing the
5974 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5975 or if we would be widening it. */
5977 if (MEM_P (op)
5978 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5979 /* Allow splitting of volatile memory references in case we don't
5980 have instruction to move the whole thing. */
5981 && (! MEM_VOLATILE_P (op)
5982 || ! have_insn_for (SET, innermode))
5983 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5984 return adjust_address_nv (op, outermode, byte);
5986 /* Handle complex values represented as CONCAT
5987 of real and imaginary part. */
5988 if (GET_CODE (op) == CONCAT)
5990 unsigned int part_size, final_offset;
5991 rtx part, res;
5993 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5994 if (byte < part_size)
5996 part = XEXP (op, 0);
5997 final_offset = byte;
5999 else
6001 part = XEXP (op, 1);
6002 final_offset = byte - part_size;
6005 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6006 return NULL_RTX;
6008 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6009 if (res)
6010 return res;
6011 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6012 return gen_rtx_SUBREG (outermode, part, final_offset);
6013 return NULL_RTX;
6016 /* A SUBREG resulting from a zero extension may fold to zero if
6017 it extracts higher bits that the ZERO_EXTEND's source bits. */
6018 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6020 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6021 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6022 return CONST0_RTX (outermode);
6025 if (SCALAR_INT_MODE_P (outermode)
6026 && SCALAR_INT_MODE_P (innermode)
6027 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6028 && byte == subreg_lowpart_offset (outermode, innermode))
6030 rtx tem = simplify_truncation (outermode, op, innermode);
6031 if (tem)
6032 return tem;
6035 return NULL_RTX;
6038 /* Make a SUBREG operation or equivalent if it folds. */
6041 simplify_gen_subreg (machine_mode outermode, rtx op,
6042 machine_mode innermode, unsigned int byte)
6044 rtx newx;
6046 newx = simplify_subreg (outermode, op, innermode, byte);
6047 if (newx)
6048 return newx;
6050 if (GET_CODE (op) == SUBREG
6051 || GET_CODE (op) == CONCAT
6052 || GET_MODE (op) == VOIDmode)
6053 return NULL_RTX;
6055 if (validate_subreg (outermode, innermode, op, byte))
6056 return gen_rtx_SUBREG (outermode, op, byte);
6058 return NULL_RTX;
6061 /* Generates a subreg to get the least significant part of EXPR (in mode
6062 INNER_MODE) to OUTER_MODE. */
6065 lowpart_subreg (machine_mode outer_mode, rtx expr,
6066 machine_mode inner_mode)
6068 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6069 subreg_lowpart_offset (outer_mode, inner_mode));
6072 /* Simplify X, an rtx expression.
6074 Return the simplified expression or NULL if no simplifications
6075 were possible.
6077 This is the preferred entry point into the simplification routines;
6078 however, we still allow passes to call the more specific routines.
6080 Right now GCC has three (yes, three) major bodies of RTL simplification
6081 code that need to be unified.
6083 1. fold_rtx in cse.c. This code uses various CSE specific
6084 information to aid in RTL simplification.
6086 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6087 it uses combine specific information to aid in RTL
6088 simplification.
6090 3. The routines in this file.
6093 Long term we want to only have one body of simplification code; to
6094 get to that state I recommend the following steps:
6096 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6097 which are not pass dependent state into these routines.
6099 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6100 use this routine whenever possible.
6102 3. Allow for pass dependent state to be provided to these
6103 routines and add simplifications based on the pass dependent
6104 state. Remove code from cse.c & combine.c that becomes
6105 redundant/dead.
6107 It will take time, but ultimately the compiler will be easier to
6108 maintain and improve. It's totally silly that when we add a
6109 simplification that it needs to be added to 4 places (3 for RTL
6110 simplification and 1 for tree simplification. */
6113 simplify_rtx (const_rtx x)
6115 const enum rtx_code code = GET_CODE (x);
6116 const machine_mode mode = GET_MODE (x);
6118 switch (GET_RTX_CLASS (code))
6120 case RTX_UNARY:
6121 return simplify_unary_operation (code, mode,
6122 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6123 case RTX_COMM_ARITH:
6124 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6125 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6127 /* Fall through.... */
6129 case RTX_BIN_ARITH:
6130 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6132 case RTX_TERNARY:
6133 case RTX_BITFIELD_OPS:
6134 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6135 XEXP (x, 0), XEXP (x, 1),
6136 XEXP (x, 2));
6138 case RTX_COMPARE:
6139 case RTX_COMM_COMPARE:
6140 return simplify_relational_operation (code, mode,
6141 ((GET_MODE (XEXP (x, 0))
6142 != VOIDmode)
6143 ? GET_MODE (XEXP (x, 0))
6144 : GET_MODE (XEXP (x, 1))),
6145 XEXP (x, 0),
6146 XEXP (x, 1));
6148 case RTX_EXTRA:
6149 if (code == SUBREG)
6150 return simplify_subreg (mode, SUBREG_REG (x),
6151 GET_MODE (SUBREG_REG (x)),
6152 SUBREG_BYTE (x));
6153 break;
6155 case RTX_OBJ:
6156 if (code == LO_SUM)
6158 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6159 if (GET_CODE (XEXP (x, 0)) == HIGH
6160 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6161 return XEXP (x, 1);
6163 break;
6165 default:
6166 break;
6168 return NULL;