[RTL-ifcvt] PR rtl-optimization/68506: Fix emitting order of insns in IF-THEN-JOIN...
[official-gcc.git] / gcc / simplify-rtx.c
blob413d61b17590ca465d6af0ed31a2bc4e09bbeab5
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "optabs.h"
30 #include "emit-rtl.h"
31 #include "recog.h"
32 #include "diagnostic-core.h"
33 #include "varasm.h"
34 #include "flags.h"
36 /* Simplification and canonicalization of RTL. */
38 /* Much code operates on (low, high) pairs; the low value is an
39 unsigned wide int, the high value a signed wide int. We
40 occasionally need to sign extend from low to high as if low were a
41 signed wide int. */
42 #define HWI_SIGN_EXTEND(low) \
43 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
45 static rtx neg_const_int (machine_mode, const_rtx);
46 static bool plus_minus_operand_p (const_rtx);
47 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
48 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
49 unsigned int);
50 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
51 rtx, rtx);
52 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
53 machine_mode, rtx, rtx);
54 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
55 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
56 rtx, rtx, rtx, rtx);
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
66 /* Test whether expression, X, is an immediate constant that represents
67 the most significant bit of machine mode MODE. */
69 bool
70 mode_signbit_p (machine_mode mode, const_rtx x)
72 unsigned HOST_WIDE_INT val;
73 unsigned int width;
75 if (GET_MODE_CLASS (mode) != MODE_INT)
76 return false;
78 width = GET_MODE_PRECISION (mode);
79 if (width == 0)
80 return false;
82 if (width <= HOST_BITS_PER_WIDE_INT
83 && CONST_INT_P (x))
84 val = INTVAL (x);
85 #if TARGET_SUPPORTS_WIDE_INT
86 else if (CONST_WIDE_INT_P (x))
88 unsigned int i;
89 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
90 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
91 return false;
92 for (i = 0; i < elts - 1; i++)
93 if (CONST_WIDE_INT_ELT (x, i) != 0)
94 return false;
95 val = CONST_WIDE_INT_ELT (x, elts - 1);
96 width %= HOST_BITS_PER_WIDE_INT;
97 if (width == 0)
98 width = HOST_BITS_PER_WIDE_INT;
100 #else
101 else if (width <= HOST_BITS_PER_DOUBLE_INT
102 && CONST_DOUBLE_AS_INT_P (x)
103 && CONST_DOUBLE_LOW (x) == 0)
105 val = CONST_DOUBLE_HIGH (x);
106 width -= HOST_BITS_PER_WIDE_INT;
108 #endif
109 else
110 /* X is not an integer constant. */
111 return false;
113 if (width < HOST_BITS_PER_WIDE_INT)
114 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
115 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
118 /* Test whether VAL is equal to the most significant bit of mode MODE
119 (after masking with the mode mask of MODE). Returns false if the
120 precision of MODE is too large to handle. */
122 bool
123 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
125 unsigned int width;
127 if (GET_MODE_CLASS (mode) != MODE_INT)
128 return false;
130 width = GET_MODE_PRECISION (mode);
131 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
132 return false;
134 val &= GET_MODE_MASK (mode);
135 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
138 /* Test whether the most significant bit of mode MODE is set in VAL.
139 Returns false if the precision of MODE is too large to handle. */
140 bool
141 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
143 unsigned int width;
145 if (GET_MODE_CLASS (mode) != MODE_INT)
146 return false;
148 width = GET_MODE_PRECISION (mode);
149 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
150 return false;
152 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
153 return val != 0;
156 /* Test whether the most significant bit of mode MODE is clear in VAL.
157 Returns false if the precision of MODE is too large to handle. */
158 bool
159 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
161 unsigned int width;
163 if (GET_MODE_CLASS (mode) != MODE_INT)
164 return false;
166 width = GET_MODE_PRECISION (mode);
167 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
168 return false;
170 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
171 return val == 0;
174 /* Make a binary operation by properly ordering the operands and
175 seeing if the expression folds. */
178 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
179 rtx op1)
181 rtx tem;
183 /* If this simplifies, do it. */
184 tem = simplify_binary_operation (code, mode, op0, op1);
185 if (tem)
186 return tem;
188 /* Put complex operands first and constants second if commutative. */
189 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
190 && swap_commutative_operands_p (op0, op1))
191 std::swap (op0, op1);
193 return gen_rtx_fmt_ee (code, mode, op0, op1);
196 /* If X is a MEM referencing the constant pool, return the real value.
197 Otherwise return X. */
199 avoid_constant_pool_reference (rtx x)
201 rtx c, tmp, addr;
202 machine_mode cmode;
203 HOST_WIDE_INT offset = 0;
205 switch (GET_CODE (x))
207 case MEM:
208 break;
210 case FLOAT_EXTEND:
211 /* Handle float extensions of constant pool references. */
212 tmp = XEXP (x, 0);
213 c = avoid_constant_pool_reference (tmp);
214 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
215 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
216 GET_MODE (x));
217 return x;
219 default:
220 return x;
223 if (GET_MODE (x) == BLKmode)
224 return x;
226 addr = XEXP (x, 0);
228 /* Call target hook to avoid the effects of -fpic etc.... */
229 addr = targetm.delegitimize_address (addr);
231 /* Split the address into a base and integer offset. */
232 if (GET_CODE (addr) == CONST
233 && GET_CODE (XEXP (addr, 0)) == PLUS
234 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
236 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
237 addr = XEXP (XEXP (addr, 0), 0);
240 if (GET_CODE (addr) == LO_SUM)
241 addr = XEXP (addr, 1);
243 /* If this is a constant pool reference, we can turn it into its
244 constant and hope that simplifications happen. */
245 if (GET_CODE (addr) == SYMBOL_REF
246 && CONSTANT_POOL_ADDRESS_P (addr))
248 c = get_pool_constant (addr);
249 cmode = get_pool_mode (addr);
251 /* If we're accessing the constant in a different mode than it was
252 originally stored, attempt to fix that up via subreg simplifications.
253 If that fails we have no choice but to return the original memory. */
254 if ((offset != 0 || cmode != GET_MODE (x))
255 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
257 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
258 if (tem && CONSTANT_P (tem))
259 return tem;
261 else
262 return c;
265 return x;
268 /* Simplify a MEM based on its attributes. This is the default
269 delegitimize_address target hook, and it's recommended that every
270 overrider call it. */
273 delegitimize_mem_from_attrs (rtx x)
275 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
276 use their base addresses as equivalent. */
277 if (MEM_P (x)
278 && MEM_EXPR (x)
279 && MEM_OFFSET_KNOWN_P (x))
281 tree decl = MEM_EXPR (x);
282 machine_mode mode = GET_MODE (x);
283 HOST_WIDE_INT offset = 0;
285 switch (TREE_CODE (decl))
287 default:
288 decl = NULL;
289 break;
291 case VAR_DECL:
292 break;
294 case ARRAY_REF:
295 case ARRAY_RANGE_REF:
296 case COMPONENT_REF:
297 case BIT_FIELD_REF:
298 case REALPART_EXPR:
299 case IMAGPART_EXPR:
300 case VIEW_CONVERT_EXPR:
302 HOST_WIDE_INT bitsize, bitpos;
303 tree toffset;
304 int unsignedp, reversep, volatilep = 0;
306 decl
307 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
308 &unsignedp, &reversep, &volatilep, false);
309 if (bitsize != GET_MODE_BITSIZE (mode)
310 || (bitpos % BITS_PER_UNIT)
311 || (toffset && !tree_fits_shwi_p (toffset)))
312 decl = NULL;
313 else
315 offset += bitpos / BITS_PER_UNIT;
316 if (toffset)
317 offset += tree_to_shwi (toffset);
319 break;
323 if (decl
324 && mode == GET_MODE (x)
325 && TREE_CODE (decl) == VAR_DECL
326 && (TREE_STATIC (decl)
327 || DECL_THREAD_LOCAL_P (decl))
328 && DECL_RTL_SET_P (decl)
329 && MEM_P (DECL_RTL (decl)))
331 rtx newx;
333 offset += MEM_OFFSET (x);
335 newx = DECL_RTL (decl);
337 if (MEM_P (newx))
339 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 if (!((offset == 0
348 || (GET_CODE (o) == PLUS
349 && GET_CODE (XEXP (o, 1)) == CONST_INT
350 && (offset == INTVAL (XEXP (o, 1))
351 || (GET_CODE (n) == PLUS
352 && GET_CODE (XEXP (n, 1)) == CONST_INT
353 && (INTVAL (XEXP (n, 1)) + offset
354 == INTVAL (XEXP (o, 1)))
355 && (n = XEXP (n, 0))))
356 && (o = XEXP (o, 0))))
357 && rtx_equal_p (o, n)))
358 x = adjust_address_nv (newx, mode, offset);
360 else if (GET_MODE (x) == GET_MODE (newx)
361 && offset == 0)
362 x = newx;
366 return x;
369 /* Make a unary operation by first seeing if it folds and otherwise making
370 the specified operation. */
373 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
374 machine_mode op_mode)
376 rtx tem;
378 /* If this simplifies, use it. */
379 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
380 return tem;
382 return gen_rtx_fmt_e (code, mode, op);
385 /* Likewise for ternary operations. */
388 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
389 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
391 rtx tem;
393 /* If this simplifies, use it. */
394 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
395 op0, op1, op2)))
396 return tem;
398 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
401 /* Likewise, for relational operations.
402 CMP_MODE specifies mode comparison is done in. */
405 simplify_gen_relational (enum rtx_code code, machine_mode mode,
406 machine_mode cmp_mode, rtx op0, rtx op1)
408 rtx tem;
410 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
411 op0, op1)))
412 return tem;
414 return gen_rtx_fmt_ee (code, mode, op0, op1);
417 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
418 and simplify the result. If FN is non-NULL, call this callback on each
419 X, if it returns non-NULL, replace X with its return value and simplify the
420 result. */
423 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
424 rtx (*fn) (rtx, const_rtx, void *), void *data)
426 enum rtx_code code = GET_CODE (x);
427 machine_mode mode = GET_MODE (x);
428 machine_mode op_mode;
429 const char *fmt;
430 rtx op0, op1, op2, newx, op;
431 rtvec vec, newvec;
432 int i, j;
434 if (__builtin_expect (fn != NULL, 0))
436 newx = fn (x, old_rtx, data);
437 if (newx)
438 return newx;
440 else if (rtx_equal_p (x, old_rtx))
441 return copy_rtx ((rtx) data);
443 switch (GET_RTX_CLASS (code))
445 case RTX_UNARY:
446 op0 = XEXP (x, 0);
447 op_mode = GET_MODE (op0);
448 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
449 if (op0 == XEXP (x, 0))
450 return x;
451 return simplify_gen_unary (code, mode, op0, op_mode);
453 case RTX_BIN_ARITH:
454 case RTX_COMM_ARITH:
455 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
456 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
457 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
458 return x;
459 return simplify_gen_binary (code, mode, op0, op1);
461 case RTX_COMPARE:
462 case RTX_COMM_COMPARE:
463 op0 = XEXP (x, 0);
464 op1 = XEXP (x, 1);
465 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
466 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
468 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
469 return x;
470 return simplify_gen_relational (code, mode, op_mode, op0, op1);
472 case RTX_TERNARY:
473 case RTX_BITFIELD_OPS:
474 op0 = XEXP (x, 0);
475 op_mode = GET_MODE (op0);
476 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
477 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
478 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
479 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
480 return x;
481 if (op_mode == VOIDmode)
482 op_mode = GET_MODE (op0);
483 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
485 case RTX_EXTRA:
486 if (code == SUBREG)
488 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
489 if (op0 == SUBREG_REG (x))
490 return x;
491 op0 = simplify_gen_subreg (GET_MODE (x), op0,
492 GET_MODE (SUBREG_REG (x)),
493 SUBREG_BYTE (x));
494 return op0 ? op0 : x;
496 break;
498 case RTX_OBJ:
499 if (code == MEM)
501 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
502 if (op0 == XEXP (x, 0))
503 return x;
504 return replace_equiv_address_nv (x, op0);
506 else if (code == LO_SUM)
508 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
509 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
511 /* (lo_sum (high x) y) -> y where x and y have the same base. */
512 if (GET_CODE (op0) == HIGH)
514 rtx base0, base1, offset0, offset1;
515 split_const (XEXP (op0, 0), &base0, &offset0);
516 split_const (op1, &base1, &offset1);
517 if (rtx_equal_p (base0, base1))
518 return op1;
521 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
522 return x;
523 return gen_rtx_LO_SUM (mode, op0, op1);
525 break;
527 default:
528 break;
531 newx = x;
532 fmt = GET_RTX_FORMAT (code);
533 for (i = 0; fmt[i]; i++)
534 switch (fmt[i])
536 case 'E':
537 vec = XVEC (x, i);
538 newvec = XVEC (newx, i);
539 for (j = 0; j < GET_NUM_ELEM (vec); j++)
541 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
542 old_rtx, fn, data);
543 if (op != RTVEC_ELT (vec, j))
545 if (newvec == vec)
547 newvec = shallow_copy_rtvec (vec);
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XVEC (newx, i) = newvec;
552 RTVEC_ELT (newvec, j) = op;
555 break;
557 case 'e':
558 if (XEXP (x, i))
560 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
561 if (op != XEXP (x, i))
563 if (x == newx)
564 newx = shallow_copy_rtx (x);
565 XEXP (newx, i) = op;
568 break;
570 return newx;
573 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
574 resulting RTX. Return a new RTX which is as simplified as possible. */
577 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
579 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
582 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
583 Only handle cases where the truncated value is inherently an rvalue.
585 RTL provides two ways of truncating a value:
587 1. a lowpart subreg. This form is only a truncation when both
588 the outer and inner modes (here MODE and OP_MODE respectively)
589 are scalar integers, and only then when the subreg is used as
590 an rvalue.
592 It is only valid to form such truncating subregs if the
593 truncation requires no action by the target. The onus for
594 proving this is on the creator of the subreg -- e.g. the
595 caller to simplify_subreg or simplify_gen_subreg -- and typically
596 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
598 2. a TRUNCATE. This form handles both scalar and compound integers.
600 The first form is preferred where valid. However, the TRUNCATE
601 handling in simplify_unary_operation turns the second form into the
602 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
603 so it is generally safe to form rvalue truncations using:
605 simplify_gen_unary (TRUNCATE, ...)
607 and leave simplify_unary_operation to work out which representation
608 should be used.
610 Because of the proof requirements on (1), simplify_truncation must
611 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
612 regardless of whether the outer truncation came from a SUBREG or a
613 TRUNCATE. For example, if the caller has proven that an SImode
614 truncation of:
616 (and:DI X Y)
618 is a no-op and can be represented as a subreg, it does not follow
619 that SImode truncations of X and Y are also no-ops. On a target
620 like 64-bit MIPS that requires SImode values to be stored in
621 sign-extended form, an SImode truncation of:
623 (and:DI (reg:DI X) (const_int 63))
625 is trivially a no-op because only the lower 6 bits can be set.
626 However, X is still an arbitrary 64-bit number and so we cannot
627 assume that truncating it too is a no-op. */
629 static rtx
630 simplify_truncation (machine_mode mode, rtx op,
631 machine_mode op_mode)
633 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
634 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
635 gcc_assert (precision <= op_precision);
637 /* Optimize truncations of zero and sign extended values. */
638 if (GET_CODE (op) == ZERO_EXTEND
639 || GET_CODE (op) == SIGN_EXTEND)
641 /* There are three possibilities. If MODE is the same as the
642 origmode, we can omit both the extension and the subreg.
643 If MODE is not larger than the origmode, we can apply the
644 truncation without the extension. Finally, if the outermode
645 is larger than the origmode, we can just extend to the appropriate
646 mode. */
647 machine_mode origmode = GET_MODE (XEXP (op, 0));
648 if (mode == origmode)
649 return XEXP (op, 0);
650 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
651 return simplify_gen_unary (TRUNCATE, mode,
652 XEXP (op, 0), origmode);
653 else
654 return simplify_gen_unary (GET_CODE (op), mode,
655 XEXP (op, 0), origmode);
658 /* If the machine can perform operations in the truncated mode, distribute
659 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
660 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
661 if (1
662 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
663 && (GET_CODE (op) == PLUS
664 || GET_CODE (op) == MINUS
665 || GET_CODE (op) == MULT))
667 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
668 if (op0)
670 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
671 if (op1)
672 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
676 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
677 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
678 the outer subreg is effectively a truncation to the original mode. */
679 if ((GET_CODE (op) == LSHIFTRT
680 || GET_CODE (op) == ASHIFTRT)
681 /* Ensure that OP_MODE is at least twice as wide as MODE
682 to avoid the possibility that an outer LSHIFTRT shifts by more
683 than the sign extension's sign_bit_copies and introduces zeros
684 into the high bits of the result. */
685 && 2 * precision <= op_precision
686 && CONST_INT_P (XEXP (op, 1))
687 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
688 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
689 && UINTVAL (XEXP (op, 1)) < precision)
690 return simplify_gen_binary (ASHIFTRT, mode,
691 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
693 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
694 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
695 the outer subreg is effectively a truncation to the original mode. */
696 if ((GET_CODE (op) == LSHIFTRT
697 || GET_CODE (op) == ASHIFTRT)
698 && CONST_INT_P (XEXP (op, 1))
699 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
701 && UINTVAL (XEXP (op, 1)) < precision)
702 return simplify_gen_binary (LSHIFTRT, mode,
703 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
706 to (ashift:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if (GET_CODE (op) == ASHIFT
709 && CONST_INT_P (XEXP (op, 1))
710 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
712 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
713 && UINTVAL (XEXP (op, 1)) < precision)
714 return simplify_gen_binary (ASHIFT, mode,
715 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
718 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
719 and C2. */
720 if (GET_CODE (op) == AND
721 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
722 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
723 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
724 && CONST_INT_P (XEXP (op, 1)))
726 rtx op0 = (XEXP (XEXP (op, 0), 0));
727 rtx shift_op = XEXP (XEXP (op, 0), 1);
728 rtx mask_op = XEXP (op, 1);
729 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
730 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
732 if (shift < precision
733 /* If doing this transform works for an X with all bits set,
734 it works for any X. */
735 && ((GET_MODE_MASK (mode) >> shift) & mask)
736 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
737 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
738 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
740 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
741 return simplify_gen_binary (AND, mode, op0, mask_op);
745 /* Recognize a word extraction from a multi-word subreg. */
746 if ((GET_CODE (op) == LSHIFTRT
747 || GET_CODE (op) == ASHIFTRT)
748 && SCALAR_INT_MODE_P (mode)
749 && SCALAR_INT_MODE_P (op_mode)
750 && precision >= BITS_PER_WORD
751 && 2 * precision <= op_precision
752 && CONST_INT_P (XEXP (op, 1))
753 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
754 && UINTVAL (XEXP (op, 1)) < op_precision)
756 int byte = subreg_lowpart_offset (mode, op_mode);
757 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
758 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
759 (WORDS_BIG_ENDIAN
760 ? byte - shifted_bytes
761 : byte + shifted_bytes));
764 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
765 and try replacing the TRUNCATE and shift with it. Don't do this
766 if the MEM has a mode-dependent address. */
767 if ((GET_CODE (op) == LSHIFTRT
768 || GET_CODE (op) == ASHIFTRT)
769 && SCALAR_INT_MODE_P (op_mode)
770 && MEM_P (XEXP (op, 0))
771 && CONST_INT_P (XEXP (op, 1))
772 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
773 && INTVAL (XEXP (op, 1)) > 0
774 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
775 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
776 MEM_ADDR_SPACE (XEXP (op, 0)))
777 && ! MEM_VOLATILE_P (XEXP (op, 0))
778 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
779 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
781 int byte = subreg_lowpart_offset (mode, op_mode);
782 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
783 return adjust_address_nv (XEXP (op, 0), mode,
784 (WORDS_BIG_ENDIAN
785 ? byte - shifted_bytes
786 : byte + shifted_bytes));
789 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
790 (OP:SI foo:SI) if OP is NEG or ABS. */
791 if ((GET_CODE (op) == ABS
792 || GET_CODE (op) == NEG)
793 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
794 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
795 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
796 return simplify_gen_unary (GET_CODE (op), mode,
797 XEXP (XEXP (op, 0), 0), mode);
799 /* (truncate:A (subreg:B (truncate:C X) 0)) is
800 (truncate:A X). */
801 if (GET_CODE (op) == SUBREG
802 && SCALAR_INT_MODE_P (mode)
803 && SCALAR_INT_MODE_P (op_mode)
804 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
805 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
806 && subreg_lowpart_p (op))
808 rtx inner = XEXP (SUBREG_REG (op), 0);
809 if (GET_MODE_PRECISION (mode)
810 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
811 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
812 else
813 /* If subreg above is paradoxical and C is narrower
814 than A, return (subreg:A (truncate:C X) 0). */
815 return simplify_gen_subreg (mode, SUBREG_REG (op),
816 GET_MODE (SUBREG_REG (op)), 0);
819 /* (truncate:A (truncate:B X)) is (truncate:A X). */
820 if (GET_CODE (op) == TRUNCATE)
821 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
824 return NULL_RTX;
827 /* Try to simplify a unary operation CODE whose output mode is to be
828 MODE with input operand OP whose mode was originally OP_MODE.
829 Return zero if no simplification can be made. */
831 simplify_unary_operation (enum rtx_code code, machine_mode mode,
832 rtx op, machine_mode op_mode)
834 rtx trueop, tem;
836 trueop = avoid_constant_pool_reference (op);
838 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
839 if (tem)
840 return tem;
842 return simplify_unary_operation_1 (code, mode, op);
845 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
846 to be exact. */
848 static bool
849 exact_int_to_float_conversion_p (const_rtx op)
851 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
852 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
853 /* Constants shouldn't reach here. */
854 gcc_assert (op0_mode != VOIDmode);
855 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
856 int in_bits = in_prec;
857 if (HWI_COMPUTABLE_MODE_P (op0_mode))
859 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
860 if (GET_CODE (op) == FLOAT)
861 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
862 else if (GET_CODE (op) == UNSIGNED_FLOAT)
863 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
864 else
865 gcc_unreachable ();
866 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
868 return in_bits <= out_bits;
871 /* Perform some simplifications we can do even if the operands
872 aren't constant. */
873 static rtx
874 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
876 enum rtx_code reversed;
877 rtx temp;
879 switch (code)
881 case NOT:
882 /* (not (not X)) == X. */
883 if (GET_CODE (op) == NOT)
884 return XEXP (op, 0);
886 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
887 comparison is all ones. */
888 if (COMPARISON_P (op)
889 && (mode == BImode || STORE_FLAG_VALUE == -1)
890 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
891 return simplify_gen_relational (reversed, mode, VOIDmode,
892 XEXP (op, 0), XEXP (op, 1));
894 /* (not (plus X -1)) can become (neg X). */
895 if (GET_CODE (op) == PLUS
896 && XEXP (op, 1) == constm1_rtx)
897 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
899 /* Similarly, (not (neg X)) is (plus X -1). */
900 if (GET_CODE (op) == NEG)
901 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
902 CONSTM1_RTX (mode));
904 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
905 if (GET_CODE (op) == XOR
906 && CONST_INT_P (XEXP (op, 1))
907 && (temp = simplify_unary_operation (NOT, mode,
908 XEXP (op, 1), mode)) != 0)
909 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
911 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
912 if (GET_CODE (op) == PLUS
913 && CONST_INT_P (XEXP (op, 1))
914 && mode_signbit_p (mode, XEXP (op, 1))
915 && (temp = simplify_unary_operation (NOT, mode,
916 XEXP (op, 1), mode)) != 0)
917 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
924 bother with. */
925 if (GET_CODE (op) == ASHIFT
926 && XEXP (op, 0) == const1_rtx)
928 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
929 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
932 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
933 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
934 so we can perform the above simplification. */
935 if (STORE_FLAG_VALUE == -1
936 && GET_CODE (op) == ASHIFTRT
937 && CONST_INT_P (XEXP (op, 1))
938 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
939 return simplify_gen_relational (GE, mode, VOIDmode,
940 XEXP (op, 0), const0_rtx);
943 if (GET_CODE (op) == SUBREG
944 && subreg_lowpart_p (op)
945 && (GET_MODE_SIZE (GET_MODE (op))
946 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
947 && GET_CODE (SUBREG_REG (op)) == ASHIFT
948 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
950 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
951 rtx x;
953 x = gen_rtx_ROTATE (inner_mode,
954 simplify_gen_unary (NOT, inner_mode, const1_rtx,
955 inner_mode),
956 XEXP (SUBREG_REG (op), 1));
957 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
958 if (temp)
959 return temp;
962 /* Apply De Morgan's laws to reduce number of patterns for machines
963 with negating logical insns (and-not, nand, etc.). If result has
964 only one NOT, put it first, since that is how the patterns are
965 coded. */
966 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
968 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
969 machine_mode op_mode;
971 op_mode = GET_MODE (in1);
972 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
974 op_mode = GET_MODE (in2);
975 if (op_mode == VOIDmode)
976 op_mode = mode;
977 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
979 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
980 std::swap (in1, in2);
982 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
983 mode, in1, in2);
986 /* (not (bswap x)) -> (bswap (not x)). */
987 if (GET_CODE (op) == BSWAP)
989 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
990 return simplify_gen_unary (BSWAP, mode, x, mode);
992 break;
994 case NEG:
995 /* (neg (neg X)) == X. */
996 if (GET_CODE (op) == NEG)
997 return XEXP (op, 0);
999 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1000 If comparison is not reversible use
1001 x ? y : (neg y). */
1002 if (GET_CODE (op) == IF_THEN_ELSE)
1004 rtx cond = XEXP (op, 0);
1005 rtx true_rtx = XEXP (op, 1);
1006 rtx false_rtx = XEXP (op, 2);
1008 if ((GET_CODE (true_rtx) == NEG
1009 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1010 || (GET_CODE (false_rtx) == NEG
1011 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1013 if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
1014 temp = reversed_comparison (cond, mode);
1015 else
1017 temp = cond;
1018 std::swap (true_rtx, false_rtx);
1020 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1021 mode, temp, true_rtx, false_rtx);
1025 /* (neg (plus X 1)) can become (not X). */
1026 if (GET_CODE (op) == PLUS
1027 && XEXP (op, 1) == const1_rtx)
1028 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1030 /* Similarly, (neg (not X)) is (plus X 1). */
1031 if (GET_CODE (op) == NOT)
1032 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1033 CONST1_RTX (mode));
1035 /* (neg (minus X Y)) can become (minus Y X). This transformation
1036 isn't safe for modes with signed zeros, since if X and Y are
1037 both +0, (minus Y X) is the same as (minus X Y). If the
1038 rounding mode is towards +infinity (or -infinity) then the two
1039 expressions will be rounded differently. */
1040 if (GET_CODE (op) == MINUS
1041 && !HONOR_SIGNED_ZEROS (mode)
1042 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1043 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1045 if (GET_CODE (op) == PLUS
1046 && !HONOR_SIGNED_ZEROS (mode)
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1049 /* (neg (plus A C)) is simplified to (minus -C A). */
1050 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1051 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1053 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1054 if (temp)
1055 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1058 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1059 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1060 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1063 /* (neg (mult A B)) becomes (mult A (neg B)).
1064 This works even for floating-point values. */
1065 if (GET_CODE (op) == MULT
1066 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1068 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1069 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1072 /* NEG commutes with ASHIFT since it is multiplication. Only do
1073 this if we can then eliminate the NEG (e.g., if the operand
1074 is a constant). */
1075 if (GET_CODE (op) == ASHIFT)
1077 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1078 if (temp)
1079 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1082 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1083 C is equal to the width of MODE minus 1. */
1084 if (GET_CODE (op) == ASHIFTRT
1085 && CONST_INT_P (XEXP (op, 1))
1086 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1087 return simplify_gen_binary (LSHIFTRT, mode,
1088 XEXP (op, 0), XEXP (op, 1));
1090 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1091 C is equal to the width of MODE minus 1. */
1092 if (GET_CODE (op) == LSHIFTRT
1093 && CONST_INT_P (XEXP (op, 1))
1094 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1095 return simplify_gen_binary (ASHIFTRT, mode,
1096 XEXP (op, 0), XEXP (op, 1));
1098 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1099 if (GET_CODE (op) == XOR
1100 && XEXP (op, 1) == const1_rtx
1101 && nonzero_bits (XEXP (op, 0), mode) == 1)
1102 return plus_constant (mode, XEXP (op, 0), -1);
1104 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1105 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1106 if (GET_CODE (op) == LT
1107 && XEXP (op, 1) == const0_rtx
1108 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1110 machine_mode inner = GET_MODE (XEXP (op, 0));
1111 int isize = GET_MODE_PRECISION (inner);
1112 if (STORE_FLAG_VALUE == 1)
1114 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1115 GEN_INT (isize - 1));
1116 if (mode == inner)
1117 return temp;
1118 if (GET_MODE_PRECISION (mode) > isize)
1119 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1120 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1122 else if (STORE_FLAG_VALUE == -1)
1124 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1125 GEN_INT (isize - 1));
1126 if (mode == inner)
1127 return temp;
1128 if (GET_MODE_PRECISION (mode) > isize)
1129 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1130 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1133 break;
1135 case TRUNCATE:
1136 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1137 with the umulXi3_highpart patterns. */
1138 if (GET_CODE (op) == LSHIFTRT
1139 && GET_CODE (XEXP (op, 0)) == MULT)
1140 break;
1142 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1144 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1146 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1147 if (temp)
1148 return temp;
1150 /* We can't handle truncation to a partial integer mode here
1151 because we don't know the real bitsize of the partial
1152 integer mode. */
1153 break;
1156 if (GET_MODE (op) != VOIDmode)
1158 temp = simplify_truncation (mode, op, GET_MODE (op));
1159 if (temp)
1160 return temp;
1163 /* If we know that the value is already truncated, we can
1164 replace the TRUNCATE with a SUBREG. */
1165 if (GET_MODE_NUNITS (mode) == 1
1166 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1167 || truncated_to_mode (mode, op)))
1169 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1170 if (temp)
1171 return temp;
1174 /* A truncate of a comparison can be replaced with a subreg if
1175 STORE_FLAG_VALUE permits. This is like the previous test,
1176 but it works even if the comparison is done in a mode larger
1177 than HOST_BITS_PER_WIDE_INT. */
1178 if (HWI_COMPUTABLE_MODE_P (mode)
1179 && COMPARISON_P (op)
1180 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1182 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1183 if (temp)
1184 return temp;
1187 /* A truncate of a memory is just loading the low part of the memory
1188 if we are not changing the meaning of the address. */
1189 if (GET_CODE (op) == MEM
1190 && !VECTOR_MODE_P (mode)
1191 && !MEM_VOLATILE_P (op)
1192 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1194 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1195 if (temp)
1196 return temp;
1199 break;
1201 case FLOAT_TRUNCATE:
1202 if (DECIMAL_FLOAT_MODE_P (mode))
1203 break;
1205 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1206 if (GET_CODE (op) == FLOAT_EXTEND
1207 && GET_MODE (XEXP (op, 0)) == mode)
1208 return XEXP (op, 0);
1210 /* (float_truncate:SF (float_truncate:DF foo:XF))
1211 = (float_truncate:SF foo:XF).
1212 This may eliminate double rounding, so it is unsafe.
1214 (float_truncate:SF (float_extend:XF foo:DF))
1215 = (float_truncate:SF foo:DF).
1217 (float_truncate:DF (float_extend:XF foo:SF))
1218 = (float_extend:DF foo:SF). */
1219 if ((GET_CODE (op) == FLOAT_TRUNCATE
1220 && flag_unsafe_math_optimizations)
1221 || GET_CODE (op) == FLOAT_EXTEND)
1222 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1223 0)))
1224 > GET_MODE_SIZE (mode)
1225 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1226 mode,
1227 XEXP (op, 0), mode);
1229 /* (float_truncate (float x)) is (float x) */
1230 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1231 && (flag_unsafe_math_optimizations
1232 || exact_int_to_float_conversion_p (op)))
1233 return simplify_gen_unary (GET_CODE (op), mode,
1234 XEXP (op, 0),
1235 GET_MODE (XEXP (op, 0)));
1237 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1238 (OP:SF foo:SF) if OP is NEG or ABS. */
1239 if ((GET_CODE (op) == ABS
1240 || GET_CODE (op) == NEG)
1241 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1242 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1243 return simplify_gen_unary (GET_CODE (op), mode,
1244 XEXP (XEXP (op, 0), 0), mode);
1246 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1247 is (float_truncate:SF x). */
1248 if (GET_CODE (op) == SUBREG
1249 && subreg_lowpart_p (op)
1250 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1251 return SUBREG_REG (op);
1252 break;
1254 case FLOAT_EXTEND:
1255 if (DECIMAL_FLOAT_MODE_P (mode))
1256 break;
1258 /* (float_extend (float_extend x)) is (float_extend x)
1260 (float_extend (float x)) is (float x) assuming that double
1261 rounding can't happen.
1263 if (GET_CODE (op) == FLOAT_EXTEND
1264 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1265 && exact_int_to_float_conversion_p (op)))
1266 return simplify_gen_unary (GET_CODE (op), mode,
1267 XEXP (op, 0),
1268 GET_MODE (XEXP (op, 0)));
1270 break;
1272 case ABS:
1273 /* (abs (neg <foo>)) -> (abs <foo>) */
1274 if (GET_CODE (op) == NEG)
1275 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1276 GET_MODE (XEXP (op, 0)));
1278 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1279 do nothing. */
1280 if (GET_MODE (op) == VOIDmode)
1281 break;
1283 /* If operand is something known to be positive, ignore the ABS. */
1284 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1285 || val_signbit_known_clear_p (GET_MODE (op),
1286 nonzero_bits (op, GET_MODE (op))))
1287 return op;
1289 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1290 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1291 return gen_rtx_NEG (mode, op);
1293 break;
1295 case FFS:
1296 /* (ffs (*_extend <X>)) = (ffs <X>) */
1297 if (GET_CODE (op) == SIGN_EXTEND
1298 || GET_CODE (op) == ZERO_EXTEND)
1299 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1300 GET_MODE (XEXP (op, 0)));
1301 break;
1303 case POPCOUNT:
1304 switch (GET_CODE (op))
1306 case BSWAP:
1307 case ZERO_EXTEND:
1308 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1309 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1310 GET_MODE (XEXP (op, 0)));
1312 case ROTATE:
1313 case ROTATERT:
1314 /* Rotations don't affect popcount. */
1315 if (!side_effects_p (XEXP (op, 1)))
1316 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1317 GET_MODE (XEXP (op, 0)));
1318 break;
1320 default:
1321 break;
1323 break;
1325 case PARITY:
1326 switch (GET_CODE (op))
1328 case NOT:
1329 case BSWAP:
1330 case ZERO_EXTEND:
1331 case SIGN_EXTEND:
1332 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1333 GET_MODE (XEXP (op, 0)));
1335 case ROTATE:
1336 case ROTATERT:
1337 /* Rotations don't affect parity. */
1338 if (!side_effects_p (XEXP (op, 1)))
1339 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1340 GET_MODE (XEXP (op, 0)));
1341 break;
1343 default:
1344 break;
1346 break;
1348 case BSWAP:
1349 /* (bswap (bswap x)) -> x. */
1350 if (GET_CODE (op) == BSWAP)
1351 return XEXP (op, 0);
1352 break;
1354 case FLOAT:
1355 /* (float (sign_extend <X>)) = (float <X>). */
1356 if (GET_CODE (op) == SIGN_EXTEND)
1357 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1358 GET_MODE (XEXP (op, 0)));
1359 break;
1361 case SIGN_EXTEND:
1362 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1363 becomes just the MINUS if its mode is MODE. This allows
1364 folding switch statements on machines using casesi (such as
1365 the VAX). */
1366 if (GET_CODE (op) == TRUNCATE
1367 && GET_MODE (XEXP (op, 0)) == mode
1368 && GET_CODE (XEXP (op, 0)) == MINUS
1369 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1370 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1371 return XEXP (op, 0);
1373 /* Extending a widening multiplication should be canonicalized to
1374 a wider widening multiplication. */
1375 if (GET_CODE (op) == MULT)
1377 rtx lhs = XEXP (op, 0);
1378 rtx rhs = XEXP (op, 1);
1379 enum rtx_code lcode = GET_CODE (lhs);
1380 enum rtx_code rcode = GET_CODE (rhs);
1382 /* Widening multiplies usually extend both operands, but sometimes
1383 they use a shift to extract a portion of a register. */
1384 if ((lcode == SIGN_EXTEND
1385 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1386 && (rcode == SIGN_EXTEND
1387 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1389 machine_mode lmode = GET_MODE (lhs);
1390 machine_mode rmode = GET_MODE (rhs);
1391 int bits;
1393 if (lcode == ASHIFTRT)
1394 /* Number of bits not shifted off the end. */
1395 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1396 else /* lcode == SIGN_EXTEND */
1397 /* Size of inner mode. */
1398 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1400 if (rcode == ASHIFTRT)
1401 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1402 else /* rcode == SIGN_EXTEND */
1403 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1405 /* We can only widen multiplies if the result is mathematiclly
1406 equivalent. I.e. if overflow was impossible. */
1407 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1408 return simplify_gen_binary
1409 (MULT, mode,
1410 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1411 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1415 /* Check for a sign extension of a subreg of a promoted
1416 variable, where the promotion is sign-extended, and the
1417 target mode is the same as the variable's promotion. */
1418 if (GET_CODE (op) == SUBREG
1419 && SUBREG_PROMOTED_VAR_P (op)
1420 && SUBREG_PROMOTED_SIGNED_P (op)
1421 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1423 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1424 if (temp)
1425 return temp;
1428 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1429 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1430 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1432 gcc_assert (GET_MODE_PRECISION (mode)
1433 > GET_MODE_PRECISION (GET_MODE (op)));
1434 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1435 GET_MODE (XEXP (op, 0)));
1438 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1439 is (sign_extend:M (subreg:O <X>)) if there is mode with
1440 GET_MODE_BITSIZE (N) - I bits.
1441 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1442 is similarly (zero_extend:M (subreg:O <X>)). */
1443 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1444 && GET_CODE (XEXP (op, 0)) == ASHIFT
1445 && CONST_INT_P (XEXP (op, 1))
1446 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1447 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1449 machine_mode tmode
1450 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1451 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1452 gcc_assert (GET_MODE_BITSIZE (mode)
1453 > GET_MODE_BITSIZE (GET_MODE (op)));
1454 if (tmode != BLKmode)
1456 rtx inner =
1457 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1458 if (inner)
1459 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1460 ? SIGN_EXTEND : ZERO_EXTEND,
1461 mode, inner, tmode);
1465 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1466 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1467 if (GET_CODE (op) == LSHIFTRT
1468 && CONST_INT_P (XEXP (op, 1))
1469 && XEXP (op, 1) != const0_rtx)
1470 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1472 #if defined(POINTERS_EXTEND_UNSIGNED)
1473 /* As we do not know which address space the pointer is referring to,
1474 we can do this only if the target does not support different pointer
1475 or address modes depending on the address space. */
1476 if (target_default_pointer_address_modes_p ()
1477 && ! POINTERS_EXTEND_UNSIGNED
1478 && mode == Pmode && GET_MODE (op) == ptr_mode
1479 && (CONSTANT_P (op)
1480 || (GET_CODE (op) == SUBREG
1481 && REG_P (SUBREG_REG (op))
1482 && REG_POINTER (SUBREG_REG (op))
1483 && GET_MODE (SUBREG_REG (op)) == Pmode))
1484 && !targetm.have_ptr_extend ())
1485 return convert_memory_address (Pmode, op);
1486 #endif
1487 break;
1489 case ZERO_EXTEND:
1490 /* Check for a zero extension of a subreg of a promoted
1491 variable, where the promotion is zero-extended, and the
1492 target mode is the same as the variable's promotion. */
1493 if (GET_CODE (op) == SUBREG
1494 && SUBREG_PROMOTED_VAR_P (op)
1495 && SUBREG_PROMOTED_UNSIGNED_P (op)
1496 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1498 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1499 if (temp)
1500 return temp;
1503 /* Extending a widening multiplication should be canonicalized to
1504 a wider widening multiplication. */
1505 if (GET_CODE (op) == MULT)
1507 rtx lhs = XEXP (op, 0);
1508 rtx rhs = XEXP (op, 1);
1509 enum rtx_code lcode = GET_CODE (lhs);
1510 enum rtx_code rcode = GET_CODE (rhs);
1512 /* Widening multiplies usually extend both operands, but sometimes
1513 they use a shift to extract a portion of a register. */
1514 if ((lcode == ZERO_EXTEND
1515 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1516 && (rcode == ZERO_EXTEND
1517 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1519 machine_mode lmode = GET_MODE (lhs);
1520 machine_mode rmode = GET_MODE (rhs);
1521 int bits;
1523 if (lcode == LSHIFTRT)
1524 /* Number of bits not shifted off the end. */
1525 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1526 else /* lcode == ZERO_EXTEND */
1527 /* Size of inner mode. */
1528 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1530 if (rcode == LSHIFTRT)
1531 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1532 else /* rcode == ZERO_EXTEND */
1533 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1535 /* We can only widen multiplies if the result is mathematiclly
1536 equivalent. I.e. if overflow was impossible. */
1537 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1538 return simplify_gen_binary
1539 (MULT, mode,
1540 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1541 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1545 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1546 if (GET_CODE (op) == ZERO_EXTEND)
1547 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1548 GET_MODE (XEXP (op, 0)));
1550 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1551 is (zero_extend:M (subreg:O <X>)) if there is mode with
1552 GET_MODE_PRECISION (N) - I bits. */
1553 if (GET_CODE (op) == LSHIFTRT
1554 && GET_CODE (XEXP (op, 0)) == ASHIFT
1555 && CONST_INT_P (XEXP (op, 1))
1556 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1557 && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1559 machine_mode tmode
1560 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1561 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1562 if (tmode != BLKmode)
1564 rtx inner =
1565 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1566 if (inner)
1567 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1571 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1572 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1573 of mode N. E.g.
1574 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1575 (and:SI (reg:SI) (const_int 63)). */
1576 if (GET_CODE (op) == SUBREG
1577 && GET_MODE_PRECISION (GET_MODE (op))
1578 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1579 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1580 <= HOST_BITS_PER_WIDE_INT
1581 && GET_MODE_PRECISION (mode)
1582 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1583 && subreg_lowpart_p (op)
1584 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1585 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1587 if (GET_MODE_PRECISION (mode)
1588 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1589 return SUBREG_REG (op);
1590 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1591 GET_MODE (SUBREG_REG (op)));
1594 #if defined(POINTERS_EXTEND_UNSIGNED)
1595 /* As we do not know which address space the pointer is referring to,
1596 we can do this only if the target does not support different pointer
1597 or address modes depending on the address space. */
1598 if (target_default_pointer_address_modes_p ()
1599 && POINTERS_EXTEND_UNSIGNED > 0
1600 && mode == Pmode && GET_MODE (op) == ptr_mode
1601 && (CONSTANT_P (op)
1602 || (GET_CODE (op) == SUBREG
1603 && REG_P (SUBREG_REG (op))
1604 && REG_POINTER (SUBREG_REG (op))
1605 && GET_MODE (SUBREG_REG (op)) == Pmode))
1606 && !targetm.have_ptr_extend ())
1607 return convert_memory_address (Pmode, op);
1608 #endif
1609 break;
1611 default:
1612 break;
1615 return 0;
1618 /* Try to compute the value of a unary operation CODE whose output mode is to
1619 be MODE with input operand OP whose mode was originally OP_MODE.
1620 Return zero if the value cannot be computed. */
1622 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1623 rtx op, machine_mode op_mode)
1625 unsigned int width = GET_MODE_PRECISION (mode);
1627 if (code == VEC_DUPLICATE)
1629 gcc_assert (VECTOR_MODE_P (mode));
1630 if (GET_MODE (op) != VOIDmode)
1632 if (!VECTOR_MODE_P (GET_MODE (op)))
1633 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1634 else
1635 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1636 (GET_MODE (op)));
1638 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1639 || GET_CODE (op) == CONST_VECTOR)
1641 int elt_size = GET_MODE_UNIT_SIZE (mode);
1642 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1643 rtvec v = rtvec_alloc (n_elts);
1644 unsigned int i;
1646 if (GET_CODE (op) != CONST_VECTOR)
1647 for (i = 0; i < n_elts; i++)
1648 RTVEC_ELT (v, i) = op;
1649 else
1651 machine_mode inmode = GET_MODE (op);
1652 int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1653 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1655 gcc_assert (in_n_elts < n_elts);
1656 gcc_assert ((n_elts % in_n_elts) == 0);
1657 for (i = 0; i < n_elts; i++)
1658 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1660 return gen_rtx_CONST_VECTOR (mode, v);
1664 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1666 int elt_size = GET_MODE_UNIT_SIZE (mode);
1667 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1668 machine_mode opmode = GET_MODE (op);
1669 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1670 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1671 rtvec v = rtvec_alloc (n_elts);
1672 unsigned int i;
1674 gcc_assert (op_n_elts == n_elts);
1675 for (i = 0; i < n_elts; i++)
1677 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1678 CONST_VECTOR_ELT (op, i),
1679 GET_MODE_INNER (opmode));
1680 if (!x)
1681 return 0;
1682 RTVEC_ELT (v, i) = x;
1684 return gen_rtx_CONST_VECTOR (mode, v);
1687 /* The order of these tests is critical so that, for example, we don't
1688 check the wrong mode (input vs. output) for a conversion operation,
1689 such as FIX. At some point, this should be simplified. */
1691 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1693 REAL_VALUE_TYPE d;
1695 if (op_mode == VOIDmode)
1697 /* CONST_INT have VOIDmode as the mode. We assume that all
1698 the bits of the constant are significant, though, this is
1699 a dangerous assumption as many times CONST_INTs are
1700 created and used with garbage in the bits outside of the
1701 precision of the implied mode of the const_int. */
1702 op_mode = MAX_MODE_INT;
1705 real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1706 d = real_value_truncate (mode, d);
1707 return const_double_from_real_value (d, mode);
1709 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1711 REAL_VALUE_TYPE d;
1713 if (op_mode == VOIDmode)
1715 /* CONST_INT have VOIDmode as the mode. We assume that all
1716 the bits of the constant are significant, though, this is
1717 a dangerous assumption as many times CONST_INTs are
1718 created and used with garbage in the bits outside of the
1719 precision of the implied mode of the const_int. */
1720 op_mode = MAX_MODE_INT;
1723 real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1724 d = real_value_truncate (mode, d);
1725 return const_double_from_real_value (d, mode);
1728 if (CONST_SCALAR_INT_P (op) && width > 0)
1730 wide_int result;
1731 machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1732 rtx_mode_t op0 = std::make_pair (op, imode);
1733 int int_value;
1735 #if TARGET_SUPPORTS_WIDE_INT == 0
1736 /* This assert keeps the simplification from producing a result
1737 that cannot be represented in a CONST_DOUBLE but a lot of
1738 upstream callers expect that this function never fails to
1739 simplify something and so you if you added this to the test
1740 above the code would die later anyway. If this assert
1741 happens, you just need to make the port support wide int. */
1742 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1743 #endif
1745 switch (code)
1747 case NOT:
1748 result = wi::bit_not (op0);
1749 break;
1751 case NEG:
1752 result = wi::neg (op0);
1753 break;
1755 case ABS:
1756 result = wi::abs (op0);
1757 break;
1759 case FFS:
1760 result = wi::shwi (wi::ffs (op0), mode);
1761 break;
1763 case CLZ:
1764 if (wi::ne_p (op0, 0))
1765 int_value = wi::clz (op0);
1766 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1767 int_value = GET_MODE_PRECISION (mode);
1768 result = wi::shwi (int_value, mode);
1769 break;
1771 case CLRSB:
1772 result = wi::shwi (wi::clrsb (op0), mode);
1773 break;
1775 case CTZ:
1776 if (wi::ne_p (op0, 0))
1777 int_value = wi::ctz (op0);
1778 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1779 int_value = GET_MODE_PRECISION (mode);
1780 result = wi::shwi (int_value, mode);
1781 break;
1783 case POPCOUNT:
1784 result = wi::shwi (wi::popcount (op0), mode);
1785 break;
1787 case PARITY:
1788 result = wi::shwi (wi::parity (op0), mode);
1789 break;
1791 case BSWAP:
1792 result = wide_int (op0).bswap ();
1793 break;
1795 case TRUNCATE:
1796 case ZERO_EXTEND:
1797 result = wide_int::from (op0, width, UNSIGNED);
1798 break;
1800 case SIGN_EXTEND:
1801 result = wide_int::from (op0, width, SIGNED);
1802 break;
1804 case SQRT:
1805 default:
1806 return 0;
1809 return immed_wide_int_const (result, mode);
1812 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1813 && SCALAR_FLOAT_MODE_P (mode)
1814 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1816 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1817 switch (code)
1819 case SQRT:
1820 return 0;
1821 case ABS:
1822 d = real_value_abs (&d);
1823 break;
1824 case NEG:
1825 d = real_value_negate (&d);
1826 break;
1827 case FLOAT_TRUNCATE:
1828 d = real_value_truncate (mode, d);
1829 break;
1830 case FLOAT_EXTEND:
1831 /* All this does is change the mode, unless changing
1832 mode class. */
1833 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1834 real_convert (&d, mode, &d);
1835 break;
1836 case FIX:
1837 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1838 break;
1839 case NOT:
1841 long tmp[4];
1842 int i;
1844 real_to_target (tmp, &d, GET_MODE (op));
1845 for (i = 0; i < 4; i++)
1846 tmp[i] = ~tmp[i];
1847 real_from_target (&d, tmp, mode);
1848 break;
1850 default:
1851 gcc_unreachable ();
1853 return const_double_from_real_value (d, mode);
1855 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1856 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1857 && GET_MODE_CLASS (mode) == MODE_INT
1858 && width > 0)
1860 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1861 operators are intentionally left unspecified (to ease implementation
1862 by target backends), for consistency, this routine implements the
1863 same semantics for constant folding as used by the middle-end. */
1865 /* This was formerly used only for non-IEEE float.
1866 eggert@twinsun.com says it is safe for IEEE also. */
1867 REAL_VALUE_TYPE t;
1868 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1869 wide_int wmax, wmin;
1870 /* This is part of the abi to real_to_integer, but we check
1871 things before making this call. */
1872 bool fail;
1874 switch (code)
1876 case FIX:
1877 if (REAL_VALUE_ISNAN (*x))
1878 return const0_rtx;
1880 /* Test against the signed upper bound. */
1881 wmax = wi::max_value (width, SIGNED);
1882 real_from_integer (&t, VOIDmode, wmax, SIGNED);
1883 if (real_less (&t, x))
1884 return immed_wide_int_const (wmax, mode);
1886 /* Test against the signed lower bound. */
1887 wmin = wi::min_value (width, SIGNED);
1888 real_from_integer (&t, VOIDmode, wmin, SIGNED);
1889 if (real_less (x, &t))
1890 return immed_wide_int_const (wmin, mode);
1892 return immed_wide_int_const (real_to_integer (x, &fail, width),
1893 mode);
1895 case UNSIGNED_FIX:
1896 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1897 return const0_rtx;
1899 /* Test against the unsigned upper bound. */
1900 wmax = wi::max_value (width, UNSIGNED);
1901 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1902 if (real_less (&t, x))
1903 return immed_wide_int_const (wmax, mode);
1905 return immed_wide_int_const (real_to_integer (x, &fail, width),
1906 mode);
1908 default:
1909 gcc_unreachable ();
1913 return NULL_RTX;
1916 /* Subroutine of simplify_binary_operation to simplify a binary operation
1917 CODE that can commute with byte swapping, with result mode MODE and
1918 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1919 Return zero if no simplification or canonicalization is possible. */
1921 static rtx
1922 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1923 rtx op0, rtx op1)
1925 rtx tem;
1927 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1928 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1930 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1931 simplify_gen_unary (BSWAP, mode, op1, mode));
1932 return simplify_gen_unary (BSWAP, mode, tem, mode);
1935 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1936 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1938 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1939 return simplify_gen_unary (BSWAP, mode, tem, mode);
1942 return NULL_RTX;
1945 /* Subroutine of simplify_binary_operation to simplify a commutative,
1946 associative binary operation CODE with result mode MODE, operating
1947 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1948 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1949 canonicalization is possible. */
1951 static rtx
1952 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1953 rtx op0, rtx op1)
1955 rtx tem;
1957 /* Linearize the operator to the left. */
1958 if (GET_CODE (op1) == code)
1960 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1961 if (GET_CODE (op0) == code)
1963 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1964 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1967 /* "a op (b op c)" becomes "(b op c) op a". */
1968 if (! swap_commutative_operands_p (op1, op0))
1969 return simplify_gen_binary (code, mode, op1, op0);
1971 std::swap (op0, op1);
1974 if (GET_CODE (op0) == code)
1976 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1977 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1979 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1980 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1983 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1984 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1985 if (tem != 0)
1986 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1988 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1989 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1990 if (tem != 0)
1991 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1994 return 0;
1998 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1999 and OP1. Return 0 if no simplification is possible.
2001 Don't use this for relational operations such as EQ or LT.
2002 Use simplify_relational_operation instead. */
2004 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2005 rtx op0, rtx op1)
2007 rtx trueop0, trueop1;
2008 rtx tem;
2010 /* Relational operations don't work here. We must know the mode
2011 of the operands in order to do the comparison correctly.
2012 Assuming a full word can give incorrect results.
2013 Consider comparing 128 with -128 in QImode. */
2014 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2015 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2017 /* Make sure the constant is second. */
2018 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2019 && swap_commutative_operands_p (op0, op1))
2020 std::swap (op0, op1);
2022 trueop0 = avoid_constant_pool_reference (op0);
2023 trueop1 = avoid_constant_pool_reference (op1);
2025 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2026 if (tem)
2027 return tem;
2028 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2030 if (tem)
2031 return tem;
2033 /* If the above steps did not result in a simplification and op0 or op1
2034 were constant pool references, use the referenced constants directly. */
2035 if (trueop0 != op0 || trueop1 != op1)
2036 return simplify_gen_binary (code, mode, trueop0, trueop1);
2038 return NULL_RTX;
2041 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2042 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2043 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2044 actual constants. */
2046 static rtx
2047 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2048 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2050 rtx tem, reversed, opleft, opright;
2051 HOST_WIDE_INT val;
2052 unsigned int width = GET_MODE_PRECISION (mode);
2054 /* Even if we can't compute a constant result,
2055 there are some cases worth simplifying. */
2057 switch (code)
2059 case PLUS:
2060 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2061 when x is NaN, infinite, or finite and nonzero. They aren't
2062 when x is -0 and the rounding mode is not towards -infinity,
2063 since (-0) + 0 is then 0. */
2064 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2065 return op0;
2067 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2068 transformations are safe even for IEEE. */
2069 if (GET_CODE (op0) == NEG)
2070 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2071 else if (GET_CODE (op1) == NEG)
2072 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2074 /* (~a) + 1 -> -a */
2075 if (INTEGRAL_MODE_P (mode)
2076 && GET_CODE (op0) == NOT
2077 && trueop1 == const1_rtx)
2078 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2080 /* Handle both-operands-constant cases. We can only add
2081 CONST_INTs to constants since the sum of relocatable symbols
2082 can't be handled by most assemblers. Don't add CONST_INT
2083 to CONST_INT since overflow won't be computed properly if wider
2084 than HOST_BITS_PER_WIDE_INT. */
2086 if ((GET_CODE (op0) == CONST
2087 || GET_CODE (op0) == SYMBOL_REF
2088 || GET_CODE (op0) == LABEL_REF)
2089 && CONST_INT_P (op1))
2090 return plus_constant (mode, op0, INTVAL (op1));
2091 else if ((GET_CODE (op1) == CONST
2092 || GET_CODE (op1) == SYMBOL_REF
2093 || GET_CODE (op1) == LABEL_REF)
2094 && CONST_INT_P (op0))
2095 return plus_constant (mode, op1, INTVAL (op0));
2097 /* See if this is something like X * C - X or vice versa or
2098 if the multiplication is written as a shift. If so, we can
2099 distribute and make a new multiply, shift, or maybe just
2100 have X (if C is 2 in the example above). But don't make
2101 something more expensive than we had before. */
2103 if (SCALAR_INT_MODE_P (mode))
2105 rtx lhs = op0, rhs = op1;
2107 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2108 wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2110 if (GET_CODE (lhs) == NEG)
2112 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2113 lhs = XEXP (lhs, 0);
2115 else if (GET_CODE (lhs) == MULT
2116 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2118 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2119 lhs = XEXP (lhs, 0);
2121 else if (GET_CODE (lhs) == ASHIFT
2122 && CONST_INT_P (XEXP (lhs, 1))
2123 && INTVAL (XEXP (lhs, 1)) >= 0
2124 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2126 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2127 GET_MODE_PRECISION (mode));
2128 lhs = XEXP (lhs, 0);
2131 if (GET_CODE (rhs) == NEG)
2133 coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2134 rhs = XEXP (rhs, 0);
2136 else if (GET_CODE (rhs) == MULT
2137 && CONST_INT_P (XEXP (rhs, 1)))
2139 coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2140 rhs = XEXP (rhs, 0);
2142 else if (GET_CODE (rhs) == ASHIFT
2143 && CONST_INT_P (XEXP (rhs, 1))
2144 && INTVAL (XEXP (rhs, 1)) >= 0
2145 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2147 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2148 GET_MODE_PRECISION (mode));
2149 rhs = XEXP (rhs, 0);
2152 if (rtx_equal_p (lhs, rhs))
2154 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2155 rtx coeff;
2156 bool speed = optimize_function_for_speed_p (cfun);
2158 coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2160 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2161 return (set_src_cost (tem, mode, speed)
2162 <= set_src_cost (orig, mode, speed) ? tem : 0);
2166 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2167 if (CONST_SCALAR_INT_P (op1)
2168 && GET_CODE (op0) == XOR
2169 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2170 && mode_signbit_p (mode, op1))
2171 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2172 simplify_gen_binary (XOR, mode, op1,
2173 XEXP (op0, 1)));
2175 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2176 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2177 && GET_CODE (op0) == MULT
2178 && GET_CODE (XEXP (op0, 0)) == NEG)
2180 rtx in1, in2;
2182 in1 = XEXP (XEXP (op0, 0), 0);
2183 in2 = XEXP (op0, 1);
2184 return simplify_gen_binary (MINUS, mode, op1,
2185 simplify_gen_binary (MULT, mode,
2186 in1, in2));
2189 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2190 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2191 is 1. */
2192 if (COMPARISON_P (op0)
2193 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2194 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2195 && (reversed = reversed_comparison (op0, mode)))
2196 return
2197 simplify_gen_unary (NEG, mode, reversed, mode);
2199 /* If one of the operands is a PLUS or a MINUS, see if we can
2200 simplify this by the associative law.
2201 Don't use the associative law for floating point.
2202 The inaccuracy makes it nonassociative,
2203 and subtle programs can break if operations are associated. */
2205 if (INTEGRAL_MODE_P (mode)
2206 && (plus_minus_operand_p (op0)
2207 || plus_minus_operand_p (op1))
2208 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2209 return tem;
2211 /* Reassociate floating point addition only when the user
2212 specifies associative math operations. */
2213 if (FLOAT_MODE_P (mode)
2214 && flag_associative_math)
2216 tem = simplify_associative_operation (code, mode, op0, op1);
2217 if (tem)
2218 return tem;
2220 break;
2222 case COMPARE:
2223 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2224 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2225 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2226 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2228 rtx xop00 = XEXP (op0, 0);
2229 rtx xop10 = XEXP (op1, 0);
2231 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2232 return xop00;
2234 if (REG_P (xop00) && REG_P (xop10)
2235 && GET_MODE (xop00) == GET_MODE (xop10)
2236 && REGNO (xop00) == REGNO (xop10)
2237 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2238 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2239 return xop00;
2241 break;
2243 case MINUS:
2244 /* We can't assume x-x is 0 even with non-IEEE floating point,
2245 but since it is zero except in very strange circumstances, we
2246 will treat it as zero with -ffinite-math-only. */
2247 if (rtx_equal_p (trueop0, trueop1)
2248 && ! side_effects_p (op0)
2249 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2250 return CONST0_RTX (mode);
2252 /* Change subtraction from zero into negation. (0 - x) is the
2253 same as -x when x is NaN, infinite, or finite and nonzero.
2254 But if the mode has signed zeros, and does not round towards
2255 -infinity, then 0 - 0 is 0, not -0. */
2256 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2257 return simplify_gen_unary (NEG, mode, op1, mode);
2259 /* (-1 - a) is ~a. */
2260 if (trueop0 == constm1_rtx)
2261 return simplify_gen_unary (NOT, mode, op1, mode);
2263 /* Subtracting 0 has no effect unless the mode has signed zeros
2264 and supports rounding towards -infinity. In such a case,
2265 0 - 0 is -0. */
2266 if (!(HONOR_SIGNED_ZEROS (mode)
2267 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2268 && trueop1 == CONST0_RTX (mode))
2269 return op0;
2271 /* See if this is something like X * C - X or vice versa or
2272 if the multiplication is written as a shift. If so, we can
2273 distribute and make a new multiply, shift, or maybe just
2274 have X (if C is 2 in the example above). But don't make
2275 something more expensive than we had before. */
2277 if (SCALAR_INT_MODE_P (mode))
2279 rtx lhs = op0, rhs = op1;
2281 wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2282 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2284 if (GET_CODE (lhs) == NEG)
2286 coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2287 lhs = XEXP (lhs, 0);
2289 else if (GET_CODE (lhs) == MULT
2290 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2292 coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2293 lhs = XEXP (lhs, 0);
2295 else if (GET_CODE (lhs) == ASHIFT
2296 && CONST_INT_P (XEXP (lhs, 1))
2297 && INTVAL (XEXP (lhs, 1)) >= 0
2298 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2300 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2301 GET_MODE_PRECISION (mode));
2302 lhs = XEXP (lhs, 0);
2305 if (GET_CODE (rhs) == NEG)
2307 negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2308 rhs = XEXP (rhs, 0);
2310 else if (GET_CODE (rhs) == MULT
2311 && CONST_INT_P (XEXP (rhs, 1)))
2313 negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2314 rhs = XEXP (rhs, 0);
2316 else if (GET_CODE (rhs) == ASHIFT
2317 && CONST_INT_P (XEXP (rhs, 1))
2318 && INTVAL (XEXP (rhs, 1)) >= 0
2319 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2321 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2322 GET_MODE_PRECISION (mode));
2323 negcoeff1 = -negcoeff1;
2324 rhs = XEXP (rhs, 0);
2327 if (rtx_equal_p (lhs, rhs))
2329 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2330 rtx coeff;
2331 bool speed = optimize_function_for_speed_p (cfun);
2333 coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2335 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2336 return (set_src_cost (tem, mode, speed)
2337 <= set_src_cost (orig, mode, speed) ? tem : 0);
2341 /* (a - (-b)) -> (a + b). True even for IEEE. */
2342 if (GET_CODE (op1) == NEG)
2343 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2345 /* (-x - c) may be simplified as (-c - x). */
2346 if (GET_CODE (op0) == NEG
2347 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2349 tem = simplify_unary_operation (NEG, mode, op1, mode);
2350 if (tem)
2351 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2354 /* Don't let a relocatable value get a negative coeff. */
2355 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2356 return simplify_gen_binary (PLUS, mode,
2357 op0,
2358 neg_const_int (mode, op1));
2360 /* (x - (x & y)) -> (x & ~y) */
2361 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2363 if (rtx_equal_p (op0, XEXP (op1, 0)))
2365 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2366 GET_MODE (XEXP (op1, 1)));
2367 return simplify_gen_binary (AND, mode, op0, tem);
2369 if (rtx_equal_p (op0, XEXP (op1, 1)))
2371 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2372 GET_MODE (XEXP (op1, 0)));
2373 return simplify_gen_binary (AND, mode, op0, tem);
2377 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2378 by reversing the comparison code if valid. */
2379 if (STORE_FLAG_VALUE == 1
2380 && trueop0 == const1_rtx
2381 && COMPARISON_P (op1)
2382 && (reversed = reversed_comparison (op1, mode)))
2383 return reversed;
2385 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2386 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2387 && GET_CODE (op1) == MULT
2388 && GET_CODE (XEXP (op1, 0)) == NEG)
2390 rtx in1, in2;
2392 in1 = XEXP (XEXP (op1, 0), 0);
2393 in2 = XEXP (op1, 1);
2394 return simplify_gen_binary (PLUS, mode,
2395 simplify_gen_binary (MULT, mode,
2396 in1, in2),
2397 op0);
2400 /* Canonicalize (minus (neg A) (mult B C)) to
2401 (minus (mult (neg B) C) A). */
2402 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2403 && GET_CODE (op1) == MULT
2404 && GET_CODE (op0) == NEG)
2406 rtx in1, in2;
2408 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2409 in2 = XEXP (op1, 1);
2410 return simplify_gen_binary (MINUS, mode,
2411 simplify_gen_binary (MULT, mode,
2412 in1, in2),
2413 XEXP (op0, 0));
2416 /* If one of the operands is a PLUS or a MINUS, see if we can
2417 simplify this by the associative law. This will, for example,
2418 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2419 Don't use the associative law for floating point.
2420 The inaccuracy makes it nonassociative,
2421 and subtle programs can break if operations are associated. */
2423 if (INTEGRAL_MODE_P (mode)
2424 && (plus_minus_operand_p (op0)
2425 || plus_minus_operand_p (op1))
2426 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2427 return tem;
2428 break;
2430 case MULT:
2431 if (trueop1 == constm1_rtx)
2432 return simplify_gen_unary (NEG, mode, op0, mode);
2434 if (GET_CODE (op0) == NEG)
2436 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2437 /* If op1 is a MULT as well and simplify_unary_operation
2438 just moved the NEG to the second operand, simplify_gen_binary
2439 below could through simplify_associative_operation move
2440 the NEG around again and recurse endlessly. */
2441 if (temp
2442 && GET_CODE (op1) == MULT
2443 && GET_CODE (temp) == MULT
2444 && XEXP (op1, 0) == XEXP (temp, 0)
2445 && GET_CODE (XEXP (temp, 1)) == NEG
2446 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2447 temp = NULL_RTX;
2448 if (temp)
2449 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2451 if (GET_CODE (op1) == NEG)
2453 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2454 /* If op0 is a MULT as well and simplify_unary_operation
2455 just moved the NEG to the second operand, simplify_gen_binary
2456 below could through simplify_associative_operation move
2457 the NEG around again and recurse endlessly. */
2458 if (temp
2459 && GET_CODE (op0) == MULT
2460 && GET_CODE (temp) == MULT
2461 && XEXP (op0, 0) == XEXP (temp, 0)
2462 && GET_CODE (XEXP (temp, 1)) == NEG
2463 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2464 temp = NULL_RTX;
2465 if (temp)
2466 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2469 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2470 x is NaN, since x * 0 is then also NaN. Nor is it valid
2471 when the mode has signed zeros, since multiplying a negative
2472 number by 0 will give -0, not 0. */
2473 if (!HONOR_NANS (mode)
2474 && !HONOR_SIGNED_ZEROS (mode)
2475 && trueop1 == CONST0_RTX (mode)
2476 && ! side_effects_p (op0))
2477 return op1;
2479 /* In IEEE floating point, x*1 is not equivalent to x for
2480 signalling NaNs. */
2481 if (!HONOR_SNANS (mode)
2482 && trueop1 == CONST1_RTX (mode))
2483 return op0;
2485 /* Convert multiply by constant power of two into shift. */
2486 if (CONST_SCALAR_INT_P (trueop1))
2488 val = wi::exact_log2 (std::make_pair (trueop1, mode));
2489 if (val >= 0)
2490 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2493 /* x*2 is x+x and x*(-1) is -x */
2494 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2495 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2496 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2497 && GET_MODE (op0) == mode)
2499 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2501 if (real_equal (d1, &dconst2))
2502 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2504 if (!HONOR_SNANS (mode)
2505 && real_equal (d1, &dconstm1))
2506 return simplify_gen_unary (NEG, mode, op0, mode);
2509 /* Optimize -x * -x as x * x. */
2510 if (FLOAT_MODE_P (mode)
2511 && GET_CODE (op0) == NEG
2512 && GET_CODE (op1) == NEG
2513 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2514 && !side_effects_p (XEXP (op0, 0)))
2515 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2517 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2518 if (SCALAR_FLOAT_MODE_P (mode)
2519 && GET_CODE (op0) == ABS
2520 && GET_CODE (op1) == ABS
2521 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2522 && !side_effects_p (XEXP (op0, 0)))
2523 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2525 /* Reassociate multiplication, but for floating point MULTs
2526 only when the user specifies unsafe math optimizations. */
2527 if (! FLOAT_MODE_P (mode)
2528 || flag_unsafe_math_optimizations)
2530 tem = simplify_associative_operation (code, mode, op0, op1);
2531 if (tem)
2532 return tem;
2534 break;
2536 case IOR:
2537 if (trueop1 == CONST0_RTX (mode))
2538 return op0;
2539 if (INTEGRAL_MODE_P (mode)
2540 && trueop1 == CONSTM1_RTX (mode)
2541 && !side_effects_p (op0))
2542 return op1;
2543 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2544 return op0;
2545 /* A | (~A) -> -1 */
2546 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2547 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2548 && ! side_effects_p (op0)
2549 && SCALAR_INT_MODE_P (mode))
2550 return constm1_rtx;
2552 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2553 if (CONST_INT_P (op1)
2554 && HWI_COMPUTABLE_MODE_P (mode)
2555 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2556 && !side_effects_p (op0))
2557 return op1;
2559 /* Canonicalize (X & C1) | C2. */
2560 if (GET_CODE (op0) == AND
2561 && CONST_INT_P (trueop1)
2562 && CONST_INT_P (XEXP (op0, 1)))
2564 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2565 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2566 HOST_WIDE_INT c2 = INTVAL (trueop1);
2568 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2569 if ((c1 & c2) == c1
2570 && !side_effects_p (XEXP (op0, 0)))
2571 return trueop1;
2573 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2574 if (((c1|c2) & mask) == mask)
2575 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2577 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2578 if (((c1 & ~c2) & mask) != (c1 & mask))
2580 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2581 gen_int_mode (c1 & ~c2, mode));
2582 return simplify_gen_binary (IOR, mode, tem, op1);
2586 /* Convert (A & B) | A to A. */
2587 if (GET_CODE (op0) == AND
2588 && (rtx_equal_p (XEXP (op0, 0), op1)
2589 || rtx_equal_p (XEXP (op0, 1), op1))
2590 && ! side_effects_p (XEXP (op0, 0))
2591 && ! side_effects_p (XEXP (op0, 1)))
2592 return op1;
2594 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2595 mode size to (rotate A CX). */
2597 if (GET_CODE (op1) == ASHIFT
2598 || GET_CODE (op1) == SUBREG)
2600 opleft = op1;
2601 opright = op0;
2603 else
2605 opright = op1;
2606 opleft = op0;
2609 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2610 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2611 && CONST_INT_P (XEXP (opleft, 1))
2612 && CONST_INT_P (XEXP (opright, 1))
2613 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2614 == GET_MODE_PRECISION (mode)))
2615 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2617 /* Same, but for ashift that has been "simplified" to a wider mode
2618 by simplify_shift_const. */
2620 if (GET_CODE (opleft) == SUBREG
2621 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2622 && GET_CODE (opright) == LSHIFTRT
2623 && GET_CODE (XEXP (opright, 0)) == SUBREG
2624 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2625 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2626 && (GET_MODE_SIZE (GET_MODE (opleft))
2627 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2628 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2629 SUBREG_REG (XEXP (opright, 0)))
2630 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2631 && CONST_INT_P (XEXP (opright, 1))
2632 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2633 == GET_MODE_PRECISION (mode)))
2634 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2635 XEXP (SUBREG_REG (opleft), 1));
2637 /* If we have (ior (and (X C1) C2)), simplify this by making
2638 C1 as small as possible if C1 actually changes. */
2639 if (CONST_INT_P (op1)
2640 && (HWI_COMPUTABLE_MODE_P (mode)
2641 || INTVAL (op1) > 0)
2642 && GET_CODE (op0) == AND
2643 && CONST_INT_P (XEXP (op0, 1))
2644 && CONST_INT_P (op1)
2645 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2647 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2648 gen_int_mode (UINTVAL (XEXP (op0, 1))
2649 & ~UINTVAL (op1),
2650 mode));
2651 return simplify_gen_binary (IOR, mode, tmp, op1);
2654 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2655 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2656 the PLUS does not affect any of the bits in OP1: then we can do
2657 the IOR as a PLUS and we can associate. This is valid if OP1
2658 can be safely shifted left C bits. */
2659 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2660 && GET_CODE (XEXP (op0, 0)) == PLUS
2661 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2662 && CONST_INT_P (XEXP (op0, 1))
2663 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2665 int count = INTVAL (XEXP (op0, 1));
2666 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2668 if (mask >> count == INTVAL (trueop1)
2669 && trunc_int_for_mode (mask, mode) == mask
2670 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2671 return simplify_gen_binary (ASHIFTRT, mode,
2672 plus_constant (mode, XEXP (op0, 0),
2673 mask),
2674 XEXP (op0, 1));
2677 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2678 if (tem)
2679 return tem;
2681 tem = simplify_associative_operation (code, mode, op0, op1);
2682 if (tem)
2683 return tem;
2684 break;
2686 case XOR:
2687 if (trueop1 == CONST0_RTX (mode))
2688 return op0;
2689 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2690 return simplify_gen_unary (NOT, mode, op0, mode);
2691 if (rtx_equal_p (trueop0, trueop1)
2692 && ! side_effects_p (op0)
2693 && GET_MODE_CLASS (mode) != MODE_CC)
2694 return CONST0_RTX (mode);
2696 /* Canonicalize XOR of the most significant bit to PLUS. */
2697 if (CONST_SCALAR_INT_P (op1)
2698 && mode_signbit_p (mode, op1))
2699 return simplify_gen_binary (PLUS, mode, op0, op1);
2700 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2701 if (CONST_SCALAR_INT_P (op1)
2702 && GET_CODE (op0) == PLUS
2703 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2704 && mode_signbit_p (mode, XEXP (op0, 1)))
2705 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2706 simplify_gen_binary (XOR, mode, op1,
2707 XEXP (op0, 1)));
2709 /* If we are XORing two things that have no bits in common,
2710 convert them into an IOR. This helps to detect rotation encoded
2711 using those methods and possibly other simplifications. */
2713 if (HWI_COMPUTABLE_MODE_P (mode)
2714 && (nonzero_bits (op0, mode)
2715 & nonzero_bits (op1, mode)) == 0)
2716 return (simplify_gen_binary (IOR, mode, op0, op1));
2718 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2719 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2720 (NOT y). */
2722 int num_negated = 0;
2724 if (GET_CODE (op0) == NOT)
2725 num_negated++, op0 = XEXP (op0, 0);
2726 if (GET_CODE (op1) == NOT)
2727 num_negated++, op1 = XEXP (op1, 0);
2729 if (num_negated == 2)
2730 return simplify_gen_binary (XOR, mode, op0, op1);
2731 else if (num_negated == 1)
2732 return simplify_gen_unary (NOT, mode,
2733 simplify_gen_binary (XOR, mode, op0, op1),
2734 mode);
2737 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2738 correspond to a machine insn or result in further simplifications
2739 if B is a constant. */
2741 if (GET_CODE (op0) == AND
2742 && rtx_equal_p (XEXP (op0, 1), op1)
2743 && ! side_effects_p (op1))
2744 return simplify_gen_binary (AND, mode,
2745 simplify_gen_unary (NOT, mode,
2746 XEXP (op0, 0), mode),
2747 op1);
2749 else if (GET_CODE (op0) == AND
2750 && rtx_equal_p (XEXP (op0, 0), op1)
2751 && ! side_effects_p (op1))
2752 return simplify_gen_binary (AND, mode,
2753 simplify_gen_unary (NOT, mode,
2754 XEXP (op0, 1), mode),
2755 op1);
2757 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2758 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2759 out bits inverted twice and not set by C. Similarly, given
2760 (xor (and (xor A B) C) D), simplify without inverting C in
2761 the xor operand: (xor (and A C) (B&C)^D).
2763 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2764 && GET_CODE (XEXP (op0, 0)) == XOR
2765 && CONST_INT_P (op1)
2766 && CONST_INT_P (XEXP (op0, 1))
2767 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2769 enum rtx_code op = GET_CODE (op0);
2770 rtx a = XEXP (XEXP (op0, 0), 0);
2771 rtx b = XEXP (XEXP (op0, 0), 1);
2772 rtx c = XEXP (op0, 1);
2773 rtx d = op1;
2774 HOST_WIDE_INT bval = INTVAL (b);
2775 HOST_WIDE_INT cval = INTVAL (c);
2776 HOST_WIDE_INT dval = INTVAL (d);
2777 HOST_WIDE_INT xcval;
2779 if (op == IOR)
2780 xcval = ~cval;
2781 else
2782 xcval = cval;
2784 return simplify_gen_binary (XOR, mode,
2785 simplify_gen_binary (op, mode, a, c),
2786 gen_int_mode ((bval & xcval) ^ dval,
2787 mode));
2790 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2791 we can transform like this:
2792 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2793 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2794 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2795 Attempt a few simplifications when B and C are both constants. */
2796 if (GET_CODE (op0) == AND
2797 && CONST_INT_P (op1)
2798 && CONST_INT_P (XEXP (op0, 1)))
2800 rtx a = XEXP (op0, 0);
2801 rtx b = XEXP (op0, 1);
2802 rtx c = op1;
2803 HOST_WIDE_INT bval = INTVAL (b);
2804 HOST_WIDE_INT cval = INTVAL (c);
2806 /* Instead of computing ~A&C, we compute its negated value,
2807 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2808 optimize for sure. If it does not simplify, we still try
2809 to compute ~A&C below, but since that always allocates
2810 RTL, we don't try that before committing to returning a
2811 simplified expression. */
2812 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2813 GEN_INT (~cval));
2815 if ((~cval & bval) == 0)
2817 rtx na_c = NULL_RTX;
2818 if (n_na_c)
2819 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2820 else
2822 /* If ~A does not simplify, don't bother: we don't
2823 want to simplify 2 operations into 3, and if na_c
2824 were to simplify with na, n_na_c would have
2825 simplified as well. */
2826 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2827 if (na)
2828 na_c = simplify_gen_binary (AND, mode, na, c);
2831 /* Try to simplify ~A&C | ~B&C. */
2832 if (na_c != NULL_RTX)
2833 return simplify_gen_binary (IOR, mode, na_c,
2834 gen_int_mode (~bval & cval, mode));
2836 else
2838 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2839 if (n_na_c == CONSTM1_RTX (mode))
2841 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2842 gen_int_mode (~cval & bval,
2843 mode));
2844 return simplify_gen_binary (IOR, mode, a_nc_b,
2845 gen_int_mode (~bval & cval,
2846 mode));
2851 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2852 comparison if STORE_FLAG_VALUE is 1. */
2853 if (STORE_FLAG_VALUE == 1
2854 && trueop1 == const1_rtx
2855 && COMPARISON_P (op0)
2856 && (reversed = reversed_comparison (op0, mode)))
2857 return reversed;
2859 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2860 is (lt foo (const_int 0)), so we can perform the above
2861 simplification if STORE_FLAG_VALUE is 1. */
2863 if (STORE_FLAG_VALUE == 1
2864 && trueop1 == const1_rtx
2865 && GET_CODE (op0) == LSHIFTRT
2866 && CONST_INT_P (XEXP (op0, 1))
2867 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2868 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2870 /* (xor (comparison foo bar) (const_int sign-bit))
2871 when STORE_FLAG_VALUE is the sign bit. */
2872 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2873 && trueop1 == const_true_rtx
2874 && COMPARISON_P (op0)
2875 && (reversed = reversed_comparison (op0, mode)))
2876 return reversed;
2878 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2879 if (tem)
2880 return tem;
2882 tem = simplify_associative_operation (code, mode, op0, op1);
2883 if (tem)
2884 return tem;
2885 break;
2887 case AND:
2888 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2889 return trueop1;
2890 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2891 return op0;
2892 if (HWI_COMPUTABLE_MODE_P (mode))
2894 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2895 HOST_WIDE_INT nzop1;
2896 if (CONST_INT_P (trueop1))
2898 HOST_WIDE_INT val1 = INTVAL (trueop1);
2899 /* If we are turning off bits already known off in OP0, we need
2900 not do an AND. */
2901 if ((nzop0 & ~val1) == 0)
2902 return op0;
2904 nzop1 = nonzero_bits (trueop1, mode);
2905 /* If we are clearing all the nonzero bits, the result is zero. */
2906 if ((nzop1 & nzop0) == 0
2907 && !side_effects_p (op0) && !side_effects_p (op1))
2908 return CONST0_RTX (mode);
2910 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2911 && GET_MODE_CLASS (mode) != MODE_CC)
2912 return op0;
2913 /* A & (~A) -> 0 */
2914 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2915 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2916 && ! side_effects_p (op0)
2917 && GET_MODE_CLASS (mode) != MODE_CC)
2918 return CONST0_RTX (mode);
2920 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2921 there are no nonzero bits of C outside of X's mode. */
2922 if ((GET_CODE (op0) == SIGN_EXTEND
2923 || GET_CODE (op0) == ZERO_EXTEND)
2924 && CONST_INT_P (trueop1)
2925 && HWI_COMPUTABLE_MODE_P (mode)
2926 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2927 & UINTVAL (trueop1)) == 0)
2929 machine_mode imode = GET_MODE (XEXP (op0, 0));
2930 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2931 gen_int_mode (INTVAL (trueop1),
2932 imode));
2933 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2936 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2937 we might be able to further simplify the AND with X and potentially
2938 remove the truncation altogether. */
2939 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2941 rtx x = XEXP (op0, 0);
2942 machine_mode xmode = GET_MODE (x);
2943 tem = simplify_gen_binary (AND, xmode, x,
2944 gen_int_mode (INTVAL (trueop1), xmode));
2945 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2948 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2949 if (GET_CODE (op0) == IOR
2950 && CONST_INT_P (trueop1)
2951 && CONST_INT_P (XEXP (op0, 1)))
2953 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2954 return simplify_gen_binary (IOR, mode,
2955 simplify_gen_binary (AND, mode,
2956 XEXP (op0, 0), op1),
2957 gen_int_mode (tmp, mode));
2960 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2961 insn (and may simplify more). */
2962 if (GET_CODE (op0) == XOR
2963 && rtx_equal_p (XEXP (op0, 0), op1)
2964 && ! side_effects_p (op1))
2965 return simplify_gen_binary (AND, mode,
2966 simplify_gen_unary (NOT, mode,
2967 XEXP (op0, 1), mode),
2968 op1);
2970 if (GET_CODE (op0) == XOR
2971 && rtx_equal_p (XEXP (op0, 1), op1)
2972 && ! side_effects_p (op1))
2973 return simplify_gen_binary (AND, mode,
2974 simplify_gen_unary (NOT, mode,
2975 XEXP (op0, 0), mode),
2976 op1);
2978 /* Similarly for (~(A ^ B)) & A. */
2979 if (GET_CODE (op0) == NOT
2980 && GET_CODE (XEXP (op0, 0)) == XOR
2981 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2982 && ! side_effects_p (op1))
2983 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2985 if (GET_CODE (op0) == NOT
2986 && GET_CODE (XEXP (op0, 0)) == XOR
2987 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2988 && ! side_effects_p (op1))
2989 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2991 /* Convert (A | B) & A to A. */
2992 if (GET_CODE (op0) == IOR
2993 && (rtx_equal_p (XEXP (op0, 0), op1)
2994 || rtx_equal_p (XEXP (op0, 1), op1))
2995 && ! side_effects_p (XEXP (op0, 0))
2996 && ! side_effects_p (XEXP (op0, 1)))
2997 return op1;
2999 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3000 ((A & N) + B) & M -> (A + B) & M
3001 Similarly if (N & M) == 0,
3002 ((A | N) + B) & M -> (A + B) & M
3003 and for - instead of + and/or ^ instead of |.
3004 Also, if (N & M) == 0, then
3005 (A +- N) & M -> A & M. */
3006 if (CONST_INT_P (trueop1)
3007 && HWI_COMPUTABLE_MODE_P (mode)
3008 && ~UINTVAL (trueop1)
3009 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3010 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3012 rtx pmop[2];
3013 int which;
3015 pmop[0] = XEXP (op0, 0);
3016 pmop[1] = XEXP (op0, 1);
3018 if (CONST_INT_P (pmop[1])
3019 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3020 return simplify_gen_binary (AND, mode, pmop[0], op1);
3022 for (which = 0; which < 2; which++)
3024 tem = pmop[which];
3025 switch (GET_CODE (tem))
3027 case AND:
3028 if (CONST_INT_P (XEXP (tem, 1))
3029 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3030 == UINTVAL (trueop1))
3031 pmop[which] = XEXP (tem, 0);
3032 break;
3033 case IOR:
3034 case XOR:
3035 if (CONST_INT_P (XEXP (tem, 1))
3036 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3037 pmop[which] = XEXP (tem, 0);
3038 break;
3039 default:
3040 break;
3044 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3046 tem = simplify_gen_binary (GET_CODE (op0), mode,
3047 pmop[0], pmop[1]);
3048 return simplify_gen_binary (code, mode, tem, op1);
3052 /* (and X (ior (not X) Y) -> (and X Y) */
3053 if (GET_CODE (op1) == IOR
3054 && GET_CODE (XEXP (op1, 0)) == NOT
3055 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3056 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3058 /* (and (ior (not X) Y) X) -> (and X Y) */
3059 if (GET_CODE (op0) == IOR
3060 && GET_CODE (XEXP (op0, 0)) == NOT
3061 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3062 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3064 /* (and X (ior Y (not X)) -> (and X Y) */
3065 if (GET_CODE (op1) == IOR
3066 && GET_CODE (XEXP (op1, 1)) == NOT
3067 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3068 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3070 /* (and (ior Y (not X)) X) -> (and X Y) */
3071 if (GET_CODE (op0) == IOR
3072 && GET_CODE (XEXP (op0, 1)) == NOT
3073 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3074 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3076 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3077 if (tem)
3078 return tem;
3080 tem = simplify_associative_operation (code, mode, op0, op1);
3081 if (tem)
3082 return tem;
3083 break;
3085 case UDIV:
3086 /* 0/x is 0 (or x&0 if x has side-effects). */
3087 if (trueop0 == CONST0_RTX (mode))
3089 if (side_effects_p (op1))
3090 return simplify_gen_binary (AND, mode, op1, trueop0);
3091 return trueop0;
3093 /* x/1 is x. */
3094 if (trueop1 == CONST1_RTX (mode))
3096 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3097 if (tem)
3098 return tem;
3100 /* Convert divide by power of two into shift. */
3101 if (CONST_INT_P (trueop1)
3102 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3103 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3104 break;
3106 case DIV:
3107 /* Handle floating point and integers separately. */
3108 if (SCALAR_FLOAT_MODE_P (mode))
3110 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3111 safe for modes with NaNs, since 0.0 / 0.0 will then be
3112 NaN rather than 0.0. Nor is it safe for modes with signed
3113 zeros, since dividing 0 by a negative number gives -0.0 */
3114 if (trueop0 == CONST0_RTX (mode)
3115 && !HONOR_NANS (mode)
3116 && !HONOR_SIGNED_ZEROS (mode)
3117 && ! side_effects_p (op1))
3118 return op0;
3119 /* x/1.0 is x. */
3120 if (trueop1 == CONST1_RTX (mode)
3121 && !HONOR_SNANS (mode))
3122 return op0;
3124 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3125 && trueop1 != CONST0_RTX (mode))
3127 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3129 /* x/-1.0 is -x. */
3130 if (real_equal (d1, &dconstm1)
3131 && !HONOR_SNANS (mode))
3132 return simplify_gen_unary (NEG, mode, op0, mode);
3134 /* Change FP division by a constant into multiplication.
3135 Only do this with -freciprocal-math. */
3136 if (flag_reciprocal_math
3137 && !real_equal (d1, &dconst0))
3139 REAL_VALUE_TYPE d;
3140 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3141 tem = const_double_from_real_value (d, mode);
3142 return simplify_gen_binary (MULT, mode, op0, tem);
3146 else if (SCALAR_INT_MODE_P (mode))
3148 /* 0/x is 0 (or x&0 if x has side-effects). */
3149 if (trueop0 == CONST0_RTX (mode)
3150 && !cfun->can_throw_non_call_exceptions)
3152 if (side_effects_p (op1))
3153 return simplify_gen_binary (AND, mode, op1, trueop0);
3154 return trueop0;
3156 /* x/1 is x. */
3157 if (trueop1 == CONST1_RTX (mode))
3159 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3160 if (tem)
3161 return tem;
3163 /* x/-1 is -x. */
3164 if (trueop1 == constm1_rtx)
3166 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3167 if (x)
3168 return simplify_gen_unary (NEG, mode, x, mode);
3171 break;
3173 case UMOD:
3174 /* 0%x is 0 (or x&0 if x has side-effects). */
3175 if (trueop0 == CONST0_RTX (mode))
3177 if (side_effects_p (op1))
3178 return simplify_gen_binary (AND, mode, op1, trueop0);
3179 return trueop0;
3181 /* x%1 is 0 (of x&0 if x has side-effects). */
3182 if (trueop1 == CONST1_RTX (mode))
3184 if (side_effects_p (op0))
3185 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3186 return CONST0_RTX (mode);
3188 /* Implement modulus by power of two as AND. */
3189 if (CONST_INT_P (trueop1)
3190 && exact_log2 (UINTVAL (trueop1)) > 0)
3191 return simplify_gen_binary (AND, mode, op0,
3192 gen_int_mode (INTVAL (op1) - 1, mode));
3193 break;
3195 case MOD:
3196 /* 0%x is 0 (or x&0 if x has side-effects). */
3197 if (trueop0 == CONST0_RTX (mode))
3199 if (side_effects_p (op1))
3200 return simplify_gen_binary (AND, mode, op1, trueop0);
3201 return trueop0;
3203 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3204 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3206 if (side_effects_p (op0))
3207 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3208 return CONST0_RTX (mode);
3210 break;
3212 case ROTATERT:
3213 case ROTATE:
3214 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3215 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3216 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3217 amount instead. */
3218 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3219 if (CONST_INT_P (trueop1)
3220 && IN_RANGE (INTVAL (trueop1),
3221 GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3222 GET_MODE_PRECISION (mode) - 1))
3223 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3224 mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3225 - INTVAL (trueop1)));
3226 #endif
3227 /* FALLTHRU */
3228 case ASHIFTRT:
3229 if (trueop1 == CONST0_RTX (mode))
3230 return op0;
3231 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3232 return op0;
3233 /* Rotating ~0 always results in ~0. */
3234 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3235 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3236 && ! side_effects_p (op1))
3237 return op0;
3238 /* Given:
3239 scalar modes M1, M2
3240 scalar constants c1, c2
3241 size (M2) > size (M1)
3242 c1 == size (M2) - size (M1)
3243 optimize:
3244 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3245 <low_part>)
3246 (const_int <c2>))
3248 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3249 <low_part>). */
3250 if (code == ASHIFTRT
3251 && !VECTOR_MODE_P (mode)
3252 && SUBREG_P (op0)
3253 && CONST_INT_P (op1)
3254 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3255 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3256 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3257 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3258 > GET_MODE_BITSIZE (mode))
3259 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3260 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3261 - GET_MODE_BITSIZE (mode)))
3262 && subreg_lowpart_p (op0))
3264 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3265 + INTVAL (op1));
3266 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3267 tmp = simplify_gen_binary (ASHIFTRT,
3268 GET_MODE (SUBREG_REG (op0)),
3269 XEXP (SUBREG_REG (op0), 0),
3270 tmp);
3271 return lowpart_subreg (mode, tmp, inner_mode);
3273 canonicalize_shift:
3274 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3276 val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3277 if (val != INTVAL (op1))
3278 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3280 break;
3282 case ASHIFT:
3283 case SS_ASHIFT:
3284 case US_ASHIFT:
3285 if (trueop1 == CONST0_RTX (mode))
3286 return op0;
3287 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3288 return op0;
3289 goto canonicalize_shift;
3291 case LSHIFTRT:
3292 if (trueop1 == CONST0_RTX (mode))
3293 return op0;
3294 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3295 return op0;
3296 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3297 if (GET_CODE (op0) == CLZ
3298 && CONST_INT_P (trueop1)
3299 && STORE_FLAG_VALUE == 1
3300 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3302 machine_mode imode = GET_MODE (XEXP (op0, 0));
3303 unsigned HOST_WIDE_INT zero_val = 0;
3305 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3306 && zero_val == GET_MODE_PRECISION (imode)
3307 && INTVAL (trueop1) == exact_log2 (zero_val))
3308 return simplify_gen_relational (EQ, mode, imode,
3309 XEXP (op0, 0), const0_rtx);
3311 goto canonicalize_shift;
3313 case SMIN:
3314 if (width <= HOST_BITS_PER_WIDE_INT
3315 && mode_signbit_p (mode, trueop1)
3316 && ! side_effects_p (op0))
3317 return op1;
3318 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3319 return op0;
3320 tem = simplify_associative_operation (code, mode, op0, op1);
3321 if (tem)
3322 return tem;
3323 break;
3325 case SMAX:
3326 if (width <= HOST_BITS_PER_WIDE_INT
3327 && CONST_INT_P (trueop1)
3328 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3329 && ! side_effects_p (op0))
3330 return op1;
3331 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3332 return op0;
3333 tem = simplify_associative_operation (code, mode, op0, op1);
3334 if (tem)
3335 return tem;
3336 break;
3338 case UMIN:
3339 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3340 return op1;
3341 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3342 return op0;
3343 tem = simplify_associative_operation (code, mode, op0, op1);
3344 if (tem)
3345 return tem;
3346 break;
3348 case UMAX:
3349 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3350 return op1;
3351 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3352 return op0;
3353 tem = simplify_associative_operation (code, mode, op0, op1);
3354 if (tem)
3355 return tem;
3356 break;
3358 case SS_PLUS:
3359 case US_PLUS:
3360 case SS_MINUS:
3361 case US_MINUS:
3362 case SS_MULT:
3363 case US_MULT:
3364 case SS_DIV:
3365 case US_DIV:
3366 /* ??? There are simplifications that can be done. */
3367 return 0;
3369 case VEC_SELECT:
3370 if (!VECTOR_MODE_P (mode))
3372 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3373 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3374 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3375 gcc_assert (XVECLEN (trueop1, 0) == 1);
3376 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3378 if (GET_CODE (trueop0) == CONST_VECTOR)
3379 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3380 (trueop1, 0, 0)));
3382 /* Extract a scalar element from a nested VEC_SELECT expression
3383 (with optional nested VEC_CONCAT expression). Some targets
3384 (i386) extract scalar element from a vector using chain of
3385 nested VEC_SELECT expressions. When input operand is a memory
3386 operand, this operation can be simplified to a simple scalar
3387 load from an offseted memory address. */
3388 if (GET_CODE (trueop0) == VEC_SELECT)
3390 rtx op0 = XEXP (trueop0, 0);
3391 rtx op1 = XEXP (trueop0, 1);
3393 machine_mode opmode = GET_MODE (op0);
3394 int elt_size = GET_MODE_UNIT_SIZE (opmode);
3395 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3397 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3398 int elem;
3400 rtvec vec;
3401 rtx tmp_op, tmp;
3403 gcc_assert (GET_CODE (op1) == PARALLEL);
3404 gcc_assert (i < n_elts);
3406 /* Select element, pointed by nested selector. */
3407 elem = INTVAL (XVECEXP (op1, 0, i));
3409 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3410 if (GET_CODE (op0) == VEC_CONCAT)
3412 rtx op00 = XEXP (op0, 0);
3413 rtx op01 = XEXP (op0, 1);
3415 machine_mode mode00, mode01;
3416 int n_elts00, n_elts01;
3418 mode00 = GET_MODE (op00);
3419 mode01 = GET_MODE (op01);
3421 /* Find out number of elements of each operand. */
3422 if (VECTOR_MODE_P (mode00))
3424 elt_size = GET_MODE_UNIT_SIZE (mode00);
3425 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3427 else
3428 n_elts00 = 1;
3430 if (VECTOR_MODE_P (mode01))
3432 elt_size = GET_MODE_UNIT_SIZE (mode01);
3433 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3435 else
3436 n_elts01 = 1;
3438 gcc_assert (n_elts == n_elts00 + n_elts01);
3440 /* Select correct operand of VEC_CONCAT
3441 and adjust selector. */
3442 if (elem < n_elts01)
3443 tmp_op = op00;
3444 else
3446 tmp_op = op01;
3447 elem -= n_elts00;
3450 else
3451 tmp_op = op0;
3453 vec = rtvec_alloc (1);
3454 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3456 tmp = gen_rtx_fmt_ee (code, mode,
3457 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3458 return tmp;
3460 if (GET_CODE (trueop0) == VEC_DUPLICATE
3461 && GET_MODE (XEXP (trueop0, 0)) == mode)
3462 return XEXP (trueop0, 0);
3464 else
3466 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3467 gcc_assert (GET_MODE_INNER (mode)
3468 == GET_MODE_INNER (GET_MODE (trueop0)));
3469 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3471 if (GET_CODE (trueop0) == CONST_VECTOR)
3473 int elt_size = GET_MODE_UNIT_SIZE (mode);
3474 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3475 rtvec v = rtvec_alloc (n_elts);
3476 unsigned int i;
3478 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3479 for (i = 0; i < n_elts; i++)
3481 rtx x = XVECEXP (trueop1, 0, i);
3483 gcc_assert (CONST_INT_P (x));
3484 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3485 INTVAL (x));
3488 return gen_rtx_CONST_VECTOR (mode, v);
3491 /* Recognize the identity. */
3492 if (GET_MODE (trueop0) == mode)
3494 bool maybe_ident = true;
3495 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3497 rtx j = XVECEXP (trueop1, 0, i);
3498 if (!CONST_INT_P (j) || INTVAL (j) != i)
3500 maybe_ident = false;
3501 break;
3504 if (maybe_ident)
3505 return trueop0;
3508 /* If we build {a,b} then permute it, build the result directly. */
3509 if (XVECLEN (trueop1, 0) == 2
3510 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3511 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3512 && GET_CODE (trueop0) == VEC_CONCAT
3513 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3514 && GET_MODE (XEXP (trueop0, 0)) == mode
3515 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3516 && GET_MODE (XEXP (trueop0, 1)) == mode)
3518 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3519 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3520 rtx subop0, subop1;
3522 gcc_assert (i0 < 4 && i1 < 4);
3523 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3524 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3526 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3529 if (XVECLEN (trueop1, 0) == 2
3530 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3531 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3532 && GET_CODE (trueop0) == VEC_CONCAT
3533 && GET_MODE (trueop0) == mode)
3535 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3536 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3537 rtx subop0, subop1;
3539 gcc_assert (i0 < 2 && i1 < 2);
3540 subop0 = XEXP (trueop0, i0);
3541 subop1 = XEXP (trueop0, i1);
3543 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3546 /* If we select one half of a vec_concat, return that. */
3547 if (GET_CODE (trueop0) == VEC_CONCAT
3548 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3550 rtx subop0 = XEXP (trueop0, 0);
3551 rtx subop1 = XEXP (trueop0, 1);
3552 machine_mode mode0 = GET_MODE (subop0);
3553 machine_mode mode1 = GET_MODE (subop1);
3554 int li = GET_MODE_UNIT_SIZE (mode0);
3555 int l0 = GET_MODE_SIZE (mode0) / li;
3556 int l1 = GET_MODE_SIZE (mode1) / li;
3557 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3558 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3560 bool success = true;
3561 for (int i = 1; i < l0; ++i)
3563 rtx j = XVECEXP (trueop1, 0, i);
3564 if (!CONST_INT_P (j) || INTVAL (j) != i)
3566 success = false;
3567 break;
3570 if (success)
3571 return subop0;
3573 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3575 bool success = true;
3576 for (int i = 1; i < l1; ++i)
3578 rtx j = XVECEXP (trueop1, 0, i);
3579 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3581 success = false;
3582 break;
3585 if (success)
3586 return subop1;
3591 if (XVECLEN (trueop1, 0) == 1
3592 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3593 && GET_CODE (trueop0) == VEC_CONCAT)
3595 rtx vec = trueop0;
3596 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3598 /* Try to find the element in the VEC_CONCAT. */
3599 while (GET_MODE (vec) != mode
3600 && GET_CODE (vec) == VEC_CONCAT)
3602 HOST_WIDE_INT vec_size;
3604 if (CONST_INT_P (XEXP (vec, 0)))
3606 /* vec_concat of two const_ints doesn't make sense with
3607 respect to modes. */
3608 if (CONST_INT_P (XEXP (vec, 1)))
3609 return 0;
3611 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3612 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3614 else
3615 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3617 if (offset < vec_size)
3618 vec = XEXP (vec, 0);
3619 else
3621 offset -= vec_size;
3622 vec = XEXP (vec, 1);
3624 vec = avoid_constant_pool_reference (vec);
3627 if (GET_MODE (vec) == mode)
3628 return vec;
3631 /* If we select elements in a vec_merge that all come from the same
3632 operand, select from that operand directly. */
3633 if (GET_CODE (op0) == VEC_MERGE)
3635 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3636 if (CONST_INT_P (trueop02))
3638 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3639 bool all_operand0 = true;
3640 bool all_operand1 = true;
3641 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3643 rtx j = XVECEXP (trueop1, 0, i);
3644 if (sel & (1 << UINTVAL (j)))
3645 all_operand1 = false;
3646 else
3647 all_operand0 = false;
3649 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3650 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3651 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3652 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3656 /* If we have two nested selects that are inverses of each
3657 other, replace them with the source operand. */
3658 if (GET_CODE (trueop0) == VEC_SELECT
3659 && GET_MODE (XEXP (trueop0, 0)) == mode)
3661 rtx op0_subop1 = XEXP (trueop0, 1);
3662 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3663 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3665 /* Apply the outer ordering vector to the inner one. (The inner
3666 ordering vector is expressly permitted to be of a different
3667 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3668 then the two VEC_SELECTs cancel. */
3669 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3671 rtx x = XVECEXP (trueop1, 0, i);
3672 if (!CONST_INT_P (x))
3673 return 0;
3674 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3675 if (!CONST_INT_P (y) || i != INTVAL (y))
3676 return 0;
3678 return XEXP (trueop0, 0);
3681 return 0;
3682 case VEC_CONCAT:
3684 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3685 ? GET_MODE (trueop0)
3686 : GET_MODE_INNER (mode));
3687 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3688 ? GET_MODE (trueop1)
3689 : GET_MODE_INNER (mode));
3691 gcc_assert (VECTOR_MODE_P (mode));
3692 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3693 == GET_MODE_SIZE (mode));
3695 if (VECTOR_MODE_P (op0_mode))
3696 gcc_assert (GET_MODE_INNER (mode)
3697 == GET_MODE_INNER (op0_mode));
3698 else
3699 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3701 if (VECTOR_MODE_P (op1_mode))
3702 gcc_assert (GET_MODE_INNER (mode)
3703 == GET_MODE_INNER (op1_mode));
3704 else
3705 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3707 if ((GET_CODE (trueop0) == CONST_VECTOR
3708 || CONST_SCALAR_INT_P (trueop0)
3709 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3710 && (GET_CODE (trueop1) == CONST_VECTOR
3711 || CONST_SCALAR_INT_P (trueop1)
3712 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3714 int elt_size = GET_MODE_UNIT_SIZE (mode);
3715 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3716 rtvec v = rtvec_alloc (n_elts);
3717 unsigned int i;
3718 unsigned in_n_elts = 1;
3720 if (VECTOR_MODE_P (op0_mode))
3721 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3722 for (i = 0; i < n_elts; i++)
3724 if (i < in_n_elts)
3726 if (!VECTOR_MODE_P (op0_mode))
3727 RTVEC_ELT (v, i) = trueop0;
3728 else
3729 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3731 else
3733 if (!VECTOR_MODE_P (op1_mode))
3734 RTVEC_ELT (v, i) = trueop1;
3735 else
3736 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3737 i - in_n_elts);
3741 return gen_rtx_CONST_VECTOR (mode, v);
3744 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3745 Restrict the transformation to avoid generating a VEC_SELECT with a
3746 mode unrelated to its operand. */
3747 if (GET_CODE (trueop0) == VEC_SELECT
3748 && GET_CODE (trueop1) == VEC_SELECT
3749 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3750 && GET_MODE (XEXP (trueop0, 0)) == mode)
3752 rtx par0 = XEXP (trueop0, 1);
3753 rtx par1 = XEXP (trueop1, 1);
3754 int len0 = XVECLEN (par0, 0);
3755 int len1 = XVECLEN (par1, 0);
3756 rtvec vec = rtvec_alloc (len0 + len1);
3757 for (int i = 0; i < len0; i++)
3758 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3759 for (int i = 0; i < len1; i++)
3760 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3761 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3762 gen_rtx_PARALLEL (VOIDmode, vec));
3765 return 0;
3767 default:
3768 gcc_unreachable ();
3771 return 0;
3775 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3776 rtx op0, rtx op1)
3778 unsigned int width = GET_MODE_PRECISION (mode);
3780 if (VECTOR_MODE_P (mode)
3781 && code != VEC_CONCAT
3782 && GET_CODE (op0) == CONST_VECTOR
3783 && GET_CODE (op1) == CONST_VECTOR)
3785 unsigned n_elts = GET_MODE_NUNITS (mode);
3786 machine_mode op0mode = GET_MODE (op0);
3787 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3788 machine_mode op1mode = GET_MODE (op1);
3789 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3790 rtvec v = rtvec_alloc (n_elts);
3791 unsigned int i;
3793 gcc_assert (op0_n_elts == n_elts);
3794 gcc_assert (op1_n_elts == n_elts);
3795 for (i = 0; i < n_elts; i++)
3797 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3798 CONST_VECTOR_ELT (op0, i),
3799 CONST_VECTOR_ELT (op1, i));
3800 if (!x)
3801 return 0;
3802 RTVEC_ELT (v, i) = x;
3805 return gen_rtx_CONST_VECTOR (mode, v);
3808 if (VECTOR_MODE_P (mode)
3809 && code == VEC_CONCAT
3810 && (CONST_SCALAR_INT_P (op0)
3811 || GET_CODE (op0) == CONST_FIXED
3812 || CONST_DOUBLE_AS_FLOAT_P (op0))
3813 && (CONST_SCALAR_INT_P (op1)
3814 || CONST_DOUBLE_AS_FLOAT_P (op1)
3815 || GET_CODE (op1) == CONST_FIXED))
3817 unsigned n_elts = GET_MODE_NUNITS (mode);
3818 rtvec v = rtvec_alloc (n_elts);
3820 gcc_assert (n_elts >= 2);
3821 if (n_elts == 2)
3823 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3824 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3826 RTVEC_ELT (v, 0) = op0;
3827 RTVEC_ELT (v, 1) = op1;
3829 else
3831 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3832 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3833 unsigned i;
3835 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3836 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3837 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3839 for (i = 0; i < op0_n_elts; ++i)
3840 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3841 for (i = 0; i < op1_n_elts; ++i)
3842 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3845 return gen_rtx_CONST_VECTOR (mode, v);
3848 if (SCALAR_FLOAT_MODE_P (mode)
3849 && CONST_DOUBLE_AS_FLOAT_P (op0)
3850 && CONST_DOUBLE_AS_FLOAT_P (op1)
3851 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3853 if (code == AND
3854 || code == IOR
3855 || code == XOR)
3857 long tmp0[4];
3858 long tmp1[4];
3859 REAL_VALUE_TYPE r;
3860 int i;
3862 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3863 GET_MODE (op0));
3864 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3865 GET_MODE (op1));
3866 for (i = 0; i < 4; i++)
3868 switch (code)
3870 case AND:
3871 tmp0[i] &= tmp1[i];
3872 break;
3873 case IOR:
3874 tmp0[i] |= tmp1[i];
3875 break;
3876 case XOR:
3877 tmp0[i] ^= tmp1[i];
3878 break;
3879 default:
3880 gcc_unreachable ();
3883 real_from_target (&r, tmp0, mode);
3884 return const_double_from_real_value (r, mode);
3886 else
3888 REAL_VALUE_TYPE f0, f1, value, result;
3889 bool inexact;
3891 real_convert (&f0, mode, CONST_DOUBLE_REAL_VALUE (op0));
3892 real_convert (&f1, mode, CONST_DOUBLE_REAL_VALUE (op1));
3894 if (HONOR_SNANS (mode)
3895 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3896 return 0;
3898 if (code == DIV
3899 && real_equal (&f1, &dconst0)
3900 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3901 return 0;
3903 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3904 && flag_trapping_math
3905 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3907 int s0 = REAL_VALUE_NEGATIVE (f0);
3908 int s1 = REAL_VALUE_NEGATIVE (f1);
3910 switch (code)
3912 case PLUS:
3913 /* Inf + -Inf = NaN plus exception. */
3914 if (s0 != s1)
3915 return 0;
3916 break;
3917 case MINUS:
3918 /* Inf - Inf = NaN plus exception. */
3919 if (s0 == s1)
3920 return 0;
3921 break;
3922 case DIV:
3923 /* Inf / Inf = NaN plus exception. */
3924 return 0;
3925 default:
3926 break;
3930 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3931 && flag_trapping_math
3932 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3933 || (REAL_VALUE_ISINF (f1)
3934 && real_equal (&f0, &dconst0))))
3935 /* Inf * 0 = NaN plus exception. */
3936 return 0;
3938 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3939 &f0, &f1);
3940 real_convert (&result, mode, &value);
3942 /* Don't constant fold this floating point operation if
3943 the result has overflowed and flag_trapping_math. */
3945 if (flag_trapping_math
3946 && MODE_HAS_INFINITIES (mode)
3947 && REAL_VALUE_ISINF (result)
3948 && !REAL_VALUE_ISINF (f0)
3949 && !REAL_VALUE_ISINF (f1))
3950 /* Overflow plus exception. */
3951 return 0;
3953 /* Don't constant fold this floating point operation if the
3954 result may dependent upon the run-time rounding mode and
3955 flag_rounding_math is set, or if GCC's software emulation
3956 is unable to accurately represent the result. */
3958 if ((flag_rounding_math
3959 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3960 && (inexact || !real_identical (&result, &value)))
3961 return NULL_RTX;
3963 return const_double_from_real_value (result, mode);
3967 /* We can fold some multi-word operations. */
3968 if ((GET_MODE_CLASS (mode) == MODE_INT
3969 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
3970 && CONST_SCALAR_INT_P (op0)
3971 && CONST_SCALAR_INT_P (op1))
3973 wide_int result;
3974 bool overflow;
3975 rtx_mode_t pop0 = std::make_pair (op0, mode);
3976 rtx_mode_t pop1 = std::make_pair (op1, mode);
3978 #if TARGET_SUPPORTS_WIDE_INT == 0
3979 /* This assert keeps the simplification from producing a result
3980 that cannot be represented in a CONST_DOUBLE but a lot of
3981 upstream callers expect that this function never fails to
3982 simplify something and so you if you added this to the test
3983 above the code would die later anyway. If this assert
3984 happens, you just need to make the port support wide int. */
3985 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
3986 #endif
3987 switch (code)
3989 case MINUS:
3990 result = wi::sub (pop0, pop1);
3991 break;
3993 case PLUS:
3994 result = wi::add (pop0, pop1);
3995 break;
3997 case MULT:
3998 result = wi::mul (pop0, pop1);
3999 break;
4001 case DIV:
4002 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4003 if (overflow)
4004 return NULL_RTX;
4005 break;
4007 case MOD:
4008 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4009 if (overflow)
4010 return NULL_RTX;
4011 break;
4013 case UDIV:
4014 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4015 if (overflow)
4016 return NULL_RTX;
4017 break;
4019 case UMOD:
4020 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4021 if (overflow)
4022 return NULL_RTX;
4023 break;
4025 case AND:
4026 result = wi::bit_and (pop0, pop1);
4027 break;
4029 case IOR:
4030 result = wi::bit_or (pop0, pop1);
4031 break;
4033 case XOR:
4034 result = wi::bit_xor (pop0, pop1);
4035 break;
4037 case SMIN:
4038 result = wi::smin (pop0, pop1);
4039 break;
4041 case SMAX:
4042 result = wi::smax (pop0, pop1);
4043 break;
4045 case UMIN:
4046 result = wi::umin (pop0, pop1);
4047 break;
4049 case UMAX:
4050 result = wi::umax (pop0, pop1);
4051 break;
4053 case LSHIFTRT:
4054 case ASHIFTRT:
4055 case ASHIFT:
4057 wide_int wop1 = pop1;
4058 if (SHIFT_COUNT_TRUNCATED)
4059 wop1 = wi::umod_trunc (wop1, width);
4060 else if (wi::geu_p (wop1, width))
4061 return NULL_RTX;
4063 switch (code)
4065 case LSHIFTRT:
4066 result = wi::lrshift (pop0, wop1);
4067 break;
4069 case ASHIFTRT:
4070 result = wi::arshift (pop0, wop1);
4071 break;
4073 case ASHIFT:
4074 result = wi::lshift (pop0, wop1);
4075 break;
4077 default:
4078 gcc_unreachable ();
4080 break;
4082 case ROTATE:
4083 case ROTATERT:
4085 if (wi::neg_p (pop1))
4086 return NULL_RTX;
4088 switch (code)
4090 case ROTATE:
4091 result = wi::lrotate (pop0, pop1);
4092 break;
4094 case ROTATERT:
4095 result = wi::rrotate (pop0, pop1);
4096 break;
4098 default:
4099 gcc_unreachable ();
4101 break;
4103 default:
4104 return NULL_RTX;
4106 return immed_wide_int_const (result, mode);
4109 return NULL_RTX;
4114 /* Return a positive integer if X should sort after Y. The value
4115 returned is 1 if and only if X and Y are both regs. */
4117 static int
4118 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4120 int result;
4122 result = (commutative_operand_precedence (y)
4123 - commutative_operand_precedence (x));
4124 if (result)
4125 return result + result;
4127 /* Group together equal REGs to do more simplification. */
4128 if (REG_P (x) && REG_P (y))
4129 return REGNO (x) > REGNO (y);
4131 return 0;
4134 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4135 operands may be another PLUS or MINUS.
4137 Rather than test for specific case, we do this by a brute-force method
4138 and do all possible simplifications until no more changes occur. Then
4139 we rebuild the operation.
4141 May return NULL_RTX when no changes were made. */
4143 static rtx
4144 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4145 rtx op1)
4147 struct simplify_plus_minus_op_data
4149 rtx op;
4150 short neg;
4151 } ops[16];
4152 rtx result, tem;
4153 int n_ops = 2;
4154 int changed, n_constants, canonicalized = 0;
4155 int i, j;
4157 memset (ops, 0, sizeof ops);
4159 /* Set up the two operands and then expand them until nothing has been
4160 changed. If we run out of room in our array, give up; this should
4161 almost never happen. */
4163 ops[0].op = op0;
4164 ops[0].neg = 0;
4165 ops[1].op = op1;
4166 ops[1].neg = (code == MINUS);
4170 changed = 0;
4171 n_constants = 0;
4173 for (i = 0; i < n_ops; i++)
4175 rtx this_op = ops[i].op;
4176 int this_neg = ops[i].neg;
4177 enum rtx_code this_code = GET_CODE (this_op);
4179 switch (this_code)
4181 case PLUS:
4182 case MINUS:
4183 if (n_ops == ARRAY_SIZE (ops))
4184 return NULL_RTX;
4186 ops[n_ops].op = XEXP (this_op, 1);
4187 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4188 n_ops++;
4190 ops[i].op = XEXP (this_op, 0);
4191 changed = 1;
4192 /* If this operand was negated then we will potentially
4193 canonicalize the expression. Similarly if we don't
4194 place the operands adjacent we're re-ordering the
4195 expression and thus might be performing a
4196 canonicalization. Ignore register re-ordering.
4197 ??? It might be better to shuffle the ops array here,
4198 but then (plus (plus (A, B), plus (C, D))) wouldn't
4199 be seen as non-canonical. */
4200 if (this_neg
4201 || (i != n_ops - 2
4202 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4203 canonicalized = 1;
4204 break;
4206 case NEG:
4207 ops[i].op = XEXP (this_op, 0);
4208 ops[i].neg = ! this_neg;
4209 changed = 1;
4210 canonicalized = 1;
4211 break;
4213 case CONST:
4214 if (n_ops != ARRAY_SIZE (ops)
4215 && GET_CODE (XEXP (this_op, 0)) == PLUS
4216 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4217 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4219 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4220 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4221 ops[n_ops].neg = this_neg;
4222 n_ops++;
4223 changed = 1;
4224 canonicalized = 1;
4226 break;
4228 case NOT:
4229 /* ~a -> (-a - 1) */
4230 if (n_ops != ARRAY_SIZE (ops))
4232 ops[n_ops].op = CONSTM1_RTX (mode);
4233 ops[n_ops++].neg = this_neg;
4234 ops[i].op = XEXP (this_op, 0);
4235 ops[i].neg = !this_neg;
4236 changed = 1;
4237 canonicalized = 1;
4239 break;
4241 case CONST_INT:
4242 n_constants++;
4243 if (this_neg)
4245 ops[i].op = neg_const_int (mode, this_op);
4246 ops[i].neg = 0;
4247 changed = 1;
4248 canonicalized = 1;
4250 break;
4252 default:
4253 break;
4257 while (changed);
4259 if (n_constants > 1)
4260 canonicalized = 1;
4262 gcc_assert (n_ops >= 2);
4264 /* If we only have two operands, we can avoid the loops. */
4265 if (n_ops == 2)
4267 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4268 rtx lhs, rhs;
4270 /* Get the two operands. Be careful with the order, especially for
4271 the cases where code == MINUS. */
4272 if (ops[0].neg && ops[1].neg)
4274 lhs = gen_rtx_NEG (mode, ops[0].op);
4275 rhs = ops[1].op;
4277 else if (ops[0].neg)
4279 lhs = ops[1].op;
4280 rhs = ops[0].op;
4282 else
4284 lhs = ops[0].op;
4285 rhs = ops[1].op;
4288 return simplify_const_binary_operation (code, mode, lhs, rhs);
4291 /* Now simplify each pair of operands until nothing changes. */
4292 while (1)
4294 /* Insertion sort is good enough for a small array. */
4295 for (i = 1; i < n_ops; i++)
4297 struct simplify_plus_minus_op_data save;
4298 int cmp;
4300 j = i - 1;
4301 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4302 if (cmp <= 0)
4303 continue;
4304 /* Just swapping registers doesn't count as canonicalization. */
4305 if (cmp != 1)
4306 canonicalized = 1;
4308 save = ops[i];
4310 ops[j + 1] = ops[j];
4311 while (j--
4312 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4313 ops[j + 1] = save;
4316 changed = 0;
4317 for (i = n_ops - 1; i > 0; i--)
4318 for (j = i - 1; j >= 0; j--)
4320 rtx lhs = ops[j].op, rhs = ops[i].op;
4321 int lneg = ops[j].neg, rneg = ops[i].neg;
4323 if (lhs != 0 && rhs != 0)
4325 enum rtx_code ncode = PLUS;
4327 if (lneg != rneg)
4329 ncode = MINUS;
4330 if (lneg)
4331 std::swap (lhs, rhs);
4333 else if (swap_commutative_operands_p (lhs, rhs))
4334 std::swap (lhs, rhs);
4336 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4337 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4339 rtx tem_lhs, tem_rhs;
4341 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4342 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4343 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4344 tem_rhs);
4346 if (tem && !CONSTANT_P (tem))
4347 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4349 else
4350 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4352 if (tem)
4354 /* Reject "simplifications" that just wrap the two
4355 arguments in a CONST. Failure to do so can result
4356 in infinite recursion with simplify_binary_operation
4357 when it calls us to simplify CONST operations.
4358 Also, if we find such a simplification, don't try
4359 any more combinations with this rhs: We must have
4360 something like symbol+offset, ie. one of the
4361 trivial CONST expressions we handle later. */
4362 if (GET_CODE (tem) == CONST
4363 && GET_CODE (XEXP (tem, 0)) == ncode
4364 && XEXP (XEXP (tem, 0), 0) == lhs
4365 && XEXP (XEXP (tem, 0), 1) == rhs)
4366 break;
4367 lneg &= rneg;
4368 if (GET_CODE (tem) == NEG)
4369 tem = XEXP (tem, 0), lneg = !lneg;
4370 if (CONST_INT_P (tem) && lneg)
4371 tem = neg_const_int (mode, tem), lneg = 0;
4373 ops[i].op = tem;
4374 ops[i].neg = lneg;
4375 ops[j].op = NULL_RTX;
4376 changed = 1;
4377 canonicalized = 1;
4382 if (!changed)
4383 break;
4385 /* Pack all the operands to the lower-numbered entries. */
4386 for (i = 0, j = 0; j < n_ops; j++)
4387 if (ops[j].op)
4389 ops[i] = ops[j];
4390 i++;
4392 n_ops = i;
4395 /* If nothing changed, fail. */
4396 if (!canonicalized)
4397 return NULL_RTX;
4399 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4400 if (n_ops == 2
4401 && CONST_INT_P (ops[1].op)
4402 && CONSTANT_P (ops[0].op)
4403 && ops[0].neg)
4404 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4406 /* We suppressed creation of trivial CONST expressions in the
4407 combination loop to avoid recursion. Create one manually now.
4408 The combination loop should have ensured that there is exactly
4409 one CONST_INT, and the sort will have ensured that it is last
4410 in the array and that any other constant will be next-to-last. */
4412 if (n_ops > 1
4413 && CONST_INT_P (ops[n_ops - 1].op)
4414 && CONSTANT_P (ops[n_ops - 2].op))
4416 rtx value = ops[n_ops - 1].op;
4417 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4418 value = neg_const_int (mode, value);
4419 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4420 INTVAL (value));
4421 n_ops--;
4424 /* Put a non-negated operand first, if possible. */
4426 for (i = 0; i < n_ops && ops[i].neg; i++)
4427 continue;
4428 if (i == n_ops)
4429 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4430 else if (i != 0)
4432 tem = ops[0].op;
4433 ops[0] = ops[i];
4434 ops[i].op = tem;
4435 ops[i].neg = 1;
4438 /* Now make the result by performing the requested operations. */
4439 result = ops[0].op;
4440 for (i = 1; i < n_ops; i++)
4441 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4442 mode, result, ops[i].op);
4444 return result;
4447 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4448 static bool
4449 plus_minus_operand_p (const_rtx x)
4451 return GET_CODE (x) == PLUS
4452 || GET_CODE (x) == MINUS
4453 || (GET_CODE (x) == CONST
4454 && GET_CODE (XEXP (x, 0)) == PLUS
4455 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4456 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4459 /* Like simplify_binary_operation except used for relational operators.
4460 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4461 not also be VOIDmode.
4463 CMP_MODE specifies in which mode the comparison is done in, so it is
4464 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4465 the operands or, if both are VOIDmode, the operands are compared in
4466 "infinite precision". */
4468 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4469 machine_mode cmp_mode, rtx op0, rtx op1)
4471 rtx tem, trueop0, trueop1;
4473 if (cmp_mode == VOIDmode)
4474 cmp_mode = GET_MODE (op0);
4475 if (cmp_mode == VOIDmode)
4476 cmp_mode = GET_MODE (op1);
4478 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4479 if (tem)
4481 if (SCALAR_FLOAT_MODE_P (mode))
4483 if (tem == const0_rtx)
4484 return CONST0_RTX (mode);
4485 #ifdef FLOAT_STORE_FLAG_VALUE
4487 REAL_VALUE_TYPE val;
4488 val = FLOAT_STORE_FLAG_VALUE (mode);
4489 return const_double_from_real_value (val, mode);
4491 #else
4492 return NULL_RTX;
4493 #endif
4495 if (VECTOR_MODE_P (mode))
4497 if (tem == const0_rtx)
4498 return CONST0_RTX (mode);
4499 #ifdef VECTOR_STORE_FLAG_VALUE
4501 int i, units;
4502 rtvec v;
4504 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4505 if (val == NULL_RTX)
4506 return NULL_RTX;
4507 if (val == const1_rtx)
4508 return CONST1_RTX (mode);
4510 units = GET_MODE_NUNITS (mode);
4511 v = rtvec_alloc (units);
4512 for (i = 0; i < units; i++)
4513 RTVEC_ELT (v, i) = val;
4514 return gen_rtx_raw_CONST_VECTOR (mode, v);
4516 #else
4517 return NULL_RTX;
4518 #endif
4521 return tem;
4524 /* For the following tests, ensure const0_rtx is op1. */
4525 if (swap_commutative_operands_p (op0, op1)
4526 || (op0 == const0_rtx && op1 != const0_rtx))
4527 std::swap (op0, op1), code = swap_condition (code);
4529 /* If op0 is a compare, extract the comparison arguments from it. */
4530 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4531 return simplify_gen_relational (code, mode, VOIDmode,
4532 XEXP (op0, 0), XEXP (op0, 1));
4534 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4535 || CC0_P (op0))
4536 return NULL_RTX;
4538 trueop0 = avoid_constant_pool_reference (op0);
4539 trueop1 = avoid_constant_pool_reference (op1);
4540 return simplify_relational_operation_1 (code, mode, cmp_mode,
4541 trueop0, trueop1);
4544 /* This part of simplify_relational_operation is only used when CMP_MODE
4545 is not in class MODE_CC (i.e. it is a real comparison).
4547 MODE is the mode of the result, while CMP_MODE specifies in which
4548 mode the comparison is done in, so it is the mode of the operands. */
4550 static rtx
4551 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4552 machine_mode cmp_mode, rtx op0, rtx op1)
4554 enum rtx_code op0code = GET_CODE (op0);
4556 if (op1 == const0_rtx && COMPARISON_P (op0))
4558 /* If op0 is a comparison, extract the comparison arguments
4559 from it. */
4560 if (code == NE)
4562 if (GET_MODE (op0) == mode)
4563 return simplify_rtx (op0);
4564 else
4565 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4566 XEXP (op0, 0), XEXP (op0, 1));
4568 else if (code == EQ)
4570 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4571 if (new_code != UNKNOWN)
4572 return simplify_gen_relational (new_code, mode, VOIDmode,
4573 XEXP (op0, 0), XEXP (op0, 1));
4577 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4578 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4579 if ((code == LTU || code == GEU)
4580 && GET_CODE (op0) == PLUS
4581 && CONST_INT_P (XEXP (op0, 1))
4582 && (rtx_equal_p (op1, XEXP (op0, 0))
4583 || rtx_equal_p (op1, XEXP (op0, 1)))
4584 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4585 && XEXP (op0, 1) != const0_rtx)
4587 rtx new_cmp
4588 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4589 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4590 cmp_mode, XEXP (op0, 0), new_cmp);
4593 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4594 if ((code == LTU || code == GEU)
4595 && GET_CODE (op0) == PLUS
4596 && rtx_equal_p (op1, XEXP (op0, 1))
4597 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4598 && !rtx_equal_p (op1, XEXP (op0, 0)))
4599 return simplify_gen_relational (code, mode, cmp_mode, op0,
4600 copy_rtx (XEXP (op0, 0)));
4602 if (op1 == const0_rtx)
4604 /* Canonicalize (GTU x 0) as (NE x 0). */
4605 if (code == GTU)
4606 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4607 /* Canonicalize (LEU x 0) as (EQ x 0). */
4608 if (code == LEU)
4609 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4611 else if (op1 == const1_rtx)
4613 switch (code)
4615 case GE:
4616 /* Canonicalize (GE x 1) as (GT x 0). */
4617 return simplify_gen_relational (GT, mode, cmp_mode,
4618 op0, const0_rtx);
4619 case GEU:
4620 /* Canonicalize (GEU x 1) as (NE x 0). */
4621 return simplify_gen_relational (NE, mode, cmp_mode,
4622 op0, const0_rtx);
4623 case LT:
4624 /* Canonicalize (LT x 1) as (LE x 0). */
4625 return simplify_gen_relational (LE, mode, cmp_mode,
4626 op0, const0_rtx);
4627 case LTU:
4628 /* Canonicalize (LTU x 1) as (EQ x 0). */
4629 return simplify_gen_relational (EQ, mode, cmp_mode,
4630 op0, const0_rtx);
4631 default:
4632 break;
4635 else if (op1 == constm1_rtx)
4637 /* Canonicalize (LE x -1) as (LT x 0). */
4638 if (code == LE)
4639 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4640 /* Canonicalize (GT x -1) as (GE x 0). */
4641 if (code == GT)
4642 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4645 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4646 if ((code == EQ || code == NE)
4647 && (op0code == PLUS || op0code == MINUS)
4648 && CONSTANT_P (op1)
4649 && CONSTANT_P (XEXP (op0, 1))
4650 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4652 rtx x = XEXP (op0, 0);
4653 rtx c = XEXP (op0, 1);
4654 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4655 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4657 /* Detect an infinite recursive condition, where we oscillate at this
4658 simplification case between:
4659 A + B == C <---> C - B == A,
4660 where A, B, and C are all constants with non-simplifiable expressions,
4661 usually SYMBOL_REFs. */
4662 if (GET_CODE (tem) == invcode
4663 && CONSTANT_P (x)
4664 && rtx_equal_p (c, XEXP (tem, 1)))
4665 return NULL_RTX;
4667 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4670 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4671 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4672 if (code == NE
4673 && op1 == const0_rtx
4674 && GET_MODE_CLASS (mode) == MODE_INT
4675 && cmp_mode != VOIDmode
4676 /* ??? Work-around BImode bugs in the ia64 backend. */
4677 && mode != BImode
4678 && cmp_mode != BImode
4679 && nonzero_bits (op0, cmp_mode) == 1
4680 && STORE_FLAG_VALUE == 1)
4681 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4682 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4683 : lowpart_subreg (mode, op0, cmp_mode);
4685 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4686 if ((code == EQ || code == NE)
4687 && op1 == const0_rtx
4688 && op0code == XOR)
4689 return simplify_gen_relational (code, mode, cmp_mode,
4690 XEXP (op0, 0), XEXP (op0, 1));
4692 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4693 if ((code == EQ || code == NE)
4694 && op0code == XOR
4695 && rtx_equal_p (XEXP (op0, 0), op1)
4696 && !side_effects_p (XEXP (op0, 0)))
4697 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4698 CONST0_RTX (mode));
4700 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4701 if ((code == EQ || code == NE)
4702 && op0code == XOR
4703 && rtx_equal_p (XEXP (op0, 1), op1)
4704 && !side_effects_p (XEXP (op0, 1)))
4705 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4706 CONST0_RTX (mode));
4708 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4709 if ((code == EQ || code == NE)
4710 && op0code == XOR
4711 && CONST_SCALAR_INT_P (op1)
4712 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4713 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4714 simplify_gen_binary (XOR, cmp_mode,
4715 XEXP (op0, 1), op1));
4717 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4718 can be implemented with a BICS instruction on some targets, or
4719 constant-folded if y is a constant. */
4720 if ((code == EQ || code == NE)
4721 && op0code == AND
4722 && rtx_equal_p (XEXP (op0, 0), op1)
4723 && !side_effects_p (op1)
4724 && op1 != CONST0_RTX (cmp_mode))
4726 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4727 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4729 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4730 CONST0_RTX (cmp_mode));
4733 /* Likewise for (eq/ne (and x y) y). */
4734 if ((code == EQ || code == NE)
4735 && op0code == AND
4736 && rtx_equal_p (XEXP (op0, 1), op1)
4737 && !side_effects_p (op1)
4738 && op1 != CONST0_RTX (cmp_mode))
4740 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4741 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4743 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4744 CONST0_RTX (cmp_mode));
4747 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4748 if ((code == EQ || code == NE)
4749 && GET_CODE (op0) == BSWAP
4750 && CONST_SCALAR_INT_P (op1))
4751 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4752 simplify_gen_unary (BSWAP, cmp_mode,
4753 op1, cmp_mode));
4755 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4756 if ((code == EQ || code == NE)
4757 && GET_CODE (op0) == BSWAP
4758 && GET_CODE (op1) == BSWAP)
4759 return simplify_gen_relational (code, mode, cmp_mode,
4760 XEXP (op0, 0), XEXP (op1, 0));
4762 if (op0code == POPCOUNT && op1 == const0_rtx)
4763 switch (code)
4765 case EQ:
4766 case LE:
4767 case LEU:
4768 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4769 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4770 XEXP (op0, 0), const0_rtx);
4772 case NE:
4773 case GT:
4774 case GTU:
4775 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4776 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4777 XEXP (op0, 0), const0_rtx);
4779 default:
4780 break;
4783 return NULL_RTX;
4786 enum
4788 CMP_EQ = 1,
4789 CMP_LT = 2,
4790 CMP_GT = 4,
4791 CMP_LTU = 8,
4792 CMP_GTU = 16
4796 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4797 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4798 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4799 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4800 For floating-point comparisons, assume that the operands were ordered. */
4802 static rtx
4803 comparison_result (enum rtx_code code, int known_results)
4805 switch (code)
4807 case EQ:
4808 case UNEQ:
4809 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4810 case NE:
4811 case LTGT:
4812 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4814 case LT:
4815 case UNLT:
4816 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4817 case GE:
4818 case UNGE:
4819 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4821 case GT:
4822 case UNGT:
4823 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4824 case LE:
4825 case UNLE:
4826 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4828 case LTU:
4829 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4830 case GEU:
4831 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4833 case GTU:
4834 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4835 case LEU:
4836 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4838 case ORDERED:
4839 return const_true_rtx;
4840 case UNORDERED:
4841 return const0_rtx;
4842 default:
4843 gcc_unreachable ();
4847 /* Check if the given comparison (done in the given MODE) is actually
4848 a tautology or a contradiction. If the mode is VOID_mode, the
4849 comparison is done in "infinite precision". If no simplification
4850 is possible, this function returns zero. Otherwise, it returns
4851 either const_true_rtx or const0_rtx. */
4854 simplify_const_relational_operation (enum rtx_code code,
4855 machine_mode mode,
4856 rtx op0, rtx op1)
4858 rtx tem;
4859 rtx trueop0;
4860 rtx trueop1;
4862 gcc_assert (mode != VOIDmode
4863 || (GET_MODE (op0) == VOIDmode
4864 && GET_MODE (op1) == VOIDmode));
4866 /* If op0 is a compare, extract the comparison arguments from it. */
4867 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4869 op1 = XEXP (op0, 1);
4870 op0 = XEXP (op0, 0);
4872 if (GET_MODE (op0) != VOIDmode)
4873 mode = GET_MODE (op0);
4874 else if (GET_MODE (op1) != VOIDmode)
4875 mode = GET_MODE (op1);
4876 else
4877 return 0;
4880 /* We can't simplify MODE_CC values since we don't know what the
4881 actual comparison is. */
4882 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4883 return 0;
4885 /* Make sure the constant is second. */
4886 if (swap_commutative_operands_p (op0, op1))
4888 std::swap (op0, op1);
4889 code = swap_condition (code);
4892 trueop0 = avoid_constant_pool_reference (op0);
4893 trueop1 = avoid_constant_pool_reference (op1);
4895 /* For integer comparisons of A and B maybe we can simplify A - B and can
4896 then simplify a comparison of that with zero. If A and B are both either
4897 a register or a CONST_INT, this can't help; testing for these cases will
4898 prevent infinite recursion here and speed things up.
4900 We can only do this for EQ and NE comparisons as otherwise we may
4901 lose or introduce overflow which we cannot disregard as undefined as
4902 we do not know the signedness of the operation on either the left or
4903 the right hand side of the comparison. */
4905 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4906 && (code == EQ || code == NE)
4907 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4908 && (REG_P (op1) || CONST_INT_P (trueop1)))
4909 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4910 /* We cannot do this if tem is a nonzero address. */
4911 && ! nonzero_address_p (tem))
4912 return simplify_const_relational_operation (signed_condition (code),
4913 mode, tem, const0_rtx);
4915 if (! HONOR_NANS (mode) && code == ORDERED)
4916 return const_true_rtx;
4918 if (! HONOR_NANS (mode) && code == UNORDERED)
4919 return const0_rtx;
4921 /* For modes without NaNs, if the two operands are equal, we know the
4922 result except if they have side-effects. Even with NaNs we know
4923 the result of unordered comparisons and, if signaling NaNs are
4924 irrelevant, also the result of LT/GT/LTGT. */
4925 if ((! HONOR_NANS (trueop0)
4926 || code == UNEQ || code == UNLE || code == UNGE
4927 || ((code == LT || code == GT || code == LTGT)
4928 && ! HONOR_SNANS (trueop0)))
4929 && rtx_equal_p (trueop0, trueop1)
4930 && ! side_effects_p (trueop0))
4931 return comparison_result (code, CMP_EQ);
4933 /* If the operands are floating-point constants, see if we can fold
4934 the result. */
4935 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4936 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4937 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4939 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
4940 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
4942 /* Comparisons are unordered iff at least one of the values is NaN. */
4943 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
4944 switch (code)
4946 case UNEQ:
4947 case UNLT:
4948 case UNGT:
4949 case UNLE:
4950 case UNGE:
4951 case NE:
4952 case UNORDERED:
4953 return const_true_rtx;
4954 case EQ:
4955 case LT:
4956 case GT:
4957 case LE:
4958 case GE:
4959 case LTGT:
4960 case ORDERED:
4961 return const0_rtx;
4962 default:
4963 return 0;
4966 return comparison_result (code,
4967 (real_equal (d0, d1) ? CMP_EQ :
4968 real_less (d0, d1) ? CMP_LT : CMP_GT));
4971 /* Otherwise, see if the operands are both integers. */
4972 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4973 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
4975 /* It would be nice if we really had a mode here. However, the
4976 largest int representable on the target is as good as
4977 infinite. */
4978 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
4979 rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
4980 rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
4982 if (wi::eq_p (ptrueop0, ptrueop1))
4983 return comparison_result (code, CMP_EQ);
4984 else
4986 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
4987 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
4988 return comparison_result (code, cr);
4992 /* Optimize comparisons with upper and lower bounds. */
4993 if (HWI_COMPUTABLE_MODE_P (mode)
4994 && CONST_INT_P (trueop1)
4995 && !side_effects_p (trueop0))
4997 int sign;
4998 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4999 HOST_WIDE_INT val = INTVAL (trueop1);
5000 HOST_WIDE_INT mmin, mmax;
5002 if (code == GEU
5003 || code == LEU
5004 || code == GTU
5005 || code == LTU)
5006 sign = 0;
5007 else
5008 sign = 1;
5010 /* Get a reduced range if the sign bit is zero. */
5011 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5013 mmin = 0;
5014 mmax = nonzero;
5016 else
5018 rtx mmin_rtx, mmax_rtx;
5019 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5021 mmin = INTVAL (mmin_rtx);
5022 mmax = INTVAL (mmax_rtx);
5023 if (sign)
5025 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5027 mmin >>= (sign_copies - 1);
5028 mmax >>= (sign_copies - 1);
5032 switch (code)
5034 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5035 case GEU:
5036 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5037 return const_true_rtx;
5038 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5039 return const0_rtx;
5040 break;
5041 case GE:
5042 if (val <= mmin)
5043 return const_true_rtx;
5044 if (val > mmax)
5045 return const0_rtx;
5046 break;
5048 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5049 case LEU:
5050 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5051 return const_true_rtx;
5052 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5053 return const0_rtx;
5054 break;
5055 case LE:
5056 if (val >= mmax)
5057 return const_true_rtx;
5058 if (val < mmin)
5059 return const0_rtx;
5060 break;
5062 case EQ:
5063 /* x == y is always false for y out of range. */
5064 if (val < mmin || val > mmax)
5065 return const0_rtx;
5066 break;
5068 /* x > y is always false for y >= mmax, always true for y < mmin. */
5069 case GTU:
5070 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5071 return const0_rtx;
5072 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5073 return const_true_rtx;
5074 break;
5075 case GT:
5076 if (val >= mmax)
5077 return const0_rtx;
5078 if (val < mmin)
5079 return const_true_rtx;
5080 break;
5082 /* x < y is always false for y <= mmin, always true for y > mmax. */
5083 case LTU:
5084 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5085 return const0_rtx;
5086 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5087 return const_true_rtx;
5088 break;
5089 case LT:
5090 if (val <= mmin)
5091 return const0_rtx;
5092 if (val > mmax)
5093 return const_true_rtx;
5094 break;
5096 case NE:
5097 /* x != y is always true for y out of range. */
5098 if (val < mmin || val > mmax)
5099 return const_true_rtx;
5100 break;
5102 default:
5103 break;
5107 /* Optimize integer comparisons with zero. */
5108 if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5110 /* Some addresses are known to be nonzero. We don't know
5111 their sign, but equality comparisons are known. */
5112 if (nonzero_address_p (trueop0))
5114 if (code == EQ || code == LEU)
5115 return const0_rtx;
5116 if (code == NE || code == GTU)
5117 return const_true_rtx;
5120 /* See if the first operand is an IOR with a constant. If so, we
5121 may be able to determine the result of this comparison. */
5122 if (GET_CODE (op0) == IOR)
5124 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5125 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5127 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5128 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5129 && (UINTVAL (inner_const)
5130 & ((unsigned HOST_WIDE_INT) 1
5131 << sign_bitnum)));
5133 switch (code)
5135 case EQ:
5136 case LEU:
5137 return const0_rtx;
5138 case NE:
5139 case GTU:
5140 return const_true_rtx;
5141 case LT:
5142 case LE:
5143 if (has_sign)
5144 return const_true_rtx;
5145 break;
5146 case GT:
5147 case GE:
5148 if (has_sign)
5149 return const0_rtx;
5150 break;
5151 default:
5152 break;
5158 /* Optimize comparison of ABS with zero. */
5159 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5160 && (GET_CODE (trueop0) == ABS
5161 || (GET_CODE (trueop0) == FLOAT_EXTEND
5162 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5164 switch (code)
5166 case LT:
5167 /* Optimize abs(x) < 0.0. */
5168 if (!HONOR_SNANS (mode)
5169 && (!INTEGRAL_MODE_P (mode)
5170 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5172 if (INTEGRAL_MODE_P (mode)
5173 && (issue_strict_overflow_warning
5174 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5175 warning (OPT_Wstrict_overflow,
5176 ("assuming signed overflow does not occur when "
5177 "assuming abs (x) < 0 is false"));
5178 return const0_rtx;
5180 break;
5182 case GE:
5183 /* Optimize abs(x) >= 0.0. */
5184 if (!HONOR_NANS (mode)
5185 && (!INTEGRAL_MODE_P (mode)
5186 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5188 if (INTEGRAL_MODE_P (mode)
5189 && (issue_strict_overflow_warning
5190 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5191 warning (OPT_Wstrict_overflow,
5192 ("assuming signed overflow does not occur when "
5193 "assuming abs (x) >= 0 is true"));
5194 return const_true_rtx;
5196 break;
5198 case UNGE:
5199 /* Optimize ! (abs(x) < 0.0). */
5200 return const_true_rtx;
5202 default:
5203 break;
5207 return 0;
5210 /* Simplify CODE, an operation with result mode MODE and three operands,
5211 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5212 a constant. Return 0 if no simplifications is possible. */
5215 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5216 machine_mode op0_mode, rtx op0, rtx op1,
5217 rtx op2)
5219 unsigned int width = GET_MODE_PRECISION (mode);
5220 bool any_change = false;
5221 rtx tem, trueop2;
5223 /* VOIDmode means "infinite" precision. */
5224 if (width == 0)
5225 width = HOST_BITS_PER_WIDE_INT;
5227 switch (code)
5229 case FMA:
5230 /* Simplify negations around the multiplication. */
5231 /* -a * -b + c => a * b + c. */
5232 if (GET_CODE (op0) == NEG)
5234 tem = simplify_unary_operation (NEG, mode, op1, mode);
5235 if (tem)
5236 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5238 else if (GET_CODE (op1) == NEG)
5240 tem = simplify_unary_operation (NEG, mode, op0, mode);
5241 if (tem)
5242 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5245 /* Canonicalize the two multiplication operands. */
5246 /* a * -b + c => -b * a + c. */
5247 if (swap_commutative_operands_p (op0, op1))
5248 std::swap (op0, op1), any_change = true;
5250 if (any_change)
5251 return gen_rtx_FMA (mode, op0, op1, op2);
5252 return NULL_RTX;
5254 case SIGN_EXTRACT:
5255 case ZERO_EXTRACT:
5256 if (CONST_INT_P (op0)
5257 && CONST_INT_P (op1)
5258 && CONST_INT_P (op2)
5259 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5260 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5262 /* Extracting a bit-field from a constant */
5263 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5264 HOST_WIDE_INT op1val = INTVAL (op1);
5265 HOST_WIDE_INT op2val = INTVAL (op2);
5266 if (BITS_BIG_ENDIAN)
5267 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5268 else
5269 val >>= op2val;
5271 if (HOST_BITS_PER_WIDE_INT != op1val)
5273 /* First zero-extend. */
5274 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5275 /* If desired, propagate sign bit. */
5276 if (code == SIGN_EXTRACT
5277 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5278 != 0)
5279 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5282 return gen_int_mode (val, mode);
5284 break;
5286 case IF_THEN_ELSE:
5287 if (CONST_INT_P (op0))
5288 return op0 != const0_rtx ? op1 : op2;
5290 /* Convert c ? a : a into "a". */
5291 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5292 return op1;
5294 /* Convert a != b ? a : b into "a". */
5295 if (GET_CODE (op0) == NE
5296 && ! side_effects_p (op0)
5297 && ! HONOR_NANS (mode)
5298 && ! HONOR_SIGNED_ZEROS (mode)
5299 && ((rtx_equal_p (XEXP (op0, 0), op1)
5300 && rtx_equal_p (XEXP (op0, 1), op2))
5301 || (rtx_equal_p (XEXP (op0, 0), op2)
5302 && rtx_equal_p (XEXP (op0, 1), op1))))
5303 return op1;
5305 /* Convert a == b ? a : b into "b". */
5306 if (GET_CODE (op0) == EQ
5307 && ! side_effects_p (op0)
5308 && ! HONOR_NANS (mode)
5309 && ! HONOR_SIGNED_ZEROS (mode)
5310 && ((rtx_equal_p (XEXP (op0, 0), op1)
5311 && rtx_equal_p (XEXP (op0, 1), op2))
5312 || (rtx_equal_p (XEXP (op0, 0), op2)
5313 && rtx_equal_p (XEXP (op0, 1), op1))))
5314 return op2;
5316 /* Convert (!c) != {0,...,0} ? a : b into
5317 c != {0,...,0} ? b : a for vector modes. */
5318 if (VECTOR_MODE_P (GET_MODE (op1))
5319 && GET_CODE (op0) == NE
5320 && GET_CODE (XEXP (op0, 0)) == NOT
5321 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5323 rtx cv = XEXP (op0, 1);
5324 int nunits = CONST_VECTOR_NUNITS (cv);
5325 bool ok = true;
5326 for (int i = 0; i < nunits; ++i)
5327 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5329 ok = false;
5330 break;
5332 if (ok)
5334 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5335 XEXP (XEXP (op0, 0), 0),
5336 XEXP (op0, 1));
5337 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5338 return retval;
5342 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5344 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5345 ? GET_MODE (XEXP (op0, 1))
5346 : GET_MODE (XEXP (op0, 0)));
5347 rtx temp;
5349 /* Look for happy constants in op1 and op2. */
5350 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5352 HOST_WIDE_INT t = INTVAL (op1);
5353 HOST_WIDE_INT f = INTVAL (op2);
5355 if (t == STORE_FLAG_VALUE && f == 0)
5356 code = GET_CODE (op0);
5357 else if (t == 0 && f == STORE_FLAG_VALUE)
5359 enum rtx_code tmp;
5360 tmp = reversed_comparison_code (op0, NULL_RTX);
5361 if (tmp == UNKNOWN)
5362 break;
5363 code = tmp;
5365 else
5366 break;
5368 return simplify_gen_relational (code, mode, cmp_mode,
5369 XEXP (op0, 0), XEXP (op0, 1));
5372 if (cmp_mode == VOIDmode)
5373 cmp_mode = op0_mode;
5374 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5375 cmp_mode, XEXP (op0, 0),
5376 XEXP (op0, 1));
5378 /* See if any simplifications were possible. */
5379 if (temp)
5381 if (CONST_INT_P (temp))
5382 return temp == const0_rtx ? op2 : op1;
5383 else if (temp)
5384 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5387 break;
5389 case VEC_MERGE:
5390 gcc_assert (GET_MODE (op0) == mode);
5391 gcc_assert (GET_MODE (op1) == mode);
5392 gcc_assert (VECTOR_MODE_P (mode));
5393 trueop2 = avoid_constant_pool_reference (op2);
5394 if (CONST_INT_P (trueop2))
5396 int elt_size = GET_MODE_UNIT_SIZE (mode);
5397 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5398 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5399 unsigned HOST_WIDE_INT mask;
5400 if (n_elts == HOST_BITS_PER_WIDE_INT)
5401 mask = -1;
5402 else
5403 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5405 if (!(sel & mask) && !side_effects_p (op0))
5406 return op1;
5407 if ((sel & mask) == mask && !side_effects_p (op1))
5408 return op0;
5410 rtx trueop0 = avoid_constant_pool_reference (op0);
5411 rtx trueop1 = avoid_constant_pool_reference (op1);
5412 if (GET_CODE (trueop0) == CONST_VECTOR
5413 && GET_CODE (trueop1) == CONST_VECTOR)
5415 rtvec v = rtvec_alloc (n_elts);
5416 unsigned int i;
5418 for (i = 0; i < n_elts; i++)
5419 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5420 ? CONST_VECTOR_ELT (trueop0, i)
5421 : CONST_VECTOR_ELT (trueop1, i));
5422 return gen_rtx_CONST_VECTOR (mode, v);
5425 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5426 if no element from a appears in the result. */
5427 if (GET_CODE (op0) == VEC_MERGE)
5429 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5430 if (CONST_INT_P (tem))
5432 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5433 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5434 return simplify_gen_ternary (code, mode, mode,
5435 XEXP (op0, 1), op1, op2);
5436 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5437 return simplify_gen_ternary (code, mode, mode,
5438 XEXP (op0, 0), op1, op2);
5441 if (GET_CODE (op1) == VEC_MERGE)
5443 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5444 if (CONST_INT_P (tem))
5446 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5447 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5448 return simplify_gen_ternary (code, mode, mode,
5449 op0, XEXP (op1, 1), op2);
5450 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5451 return simplify_gen_ternary (code, mode, mode,
5452 op0, XEXP (op1, 0), op2);
5456 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5457 with a. */
5458 if (GET_CODE (op0) == VEC_DUPLICATE
5459 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5460 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5461 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5463 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5464 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5466 if (XEXP (XEXP (op0, 0), 0) == op1
5467 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5468 return op1;
5473 if (rtx_equal_p (op0, op1)
5474 && !side_effects_p (op2) && !side_effects_p (op1))
5475 return op0;
5477 break;
5479 default:
5480 gcc_unreachable ();
5483 return 0;
5486 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5487 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5488 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5490 Works by unpacking OP into a collection of 8-bit values
5491 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5492 and then repacking them again for OUTERMODE. */
5494 static rtx
5495 simplify_immed_subreg (machine_mode outermode, rtx op,
5496 machine_mode innermode, unsigned int byte)
5498 enum {
5499 value_bit = 8,
5500 value_mask = (1 << value_bit) - 1
5502 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5503 int value_start;
5504 int i;
5505 int elem;
5507 int num_elem;
5508 rtx * elems;
5509 int elem_bitsize;
5510 rtx result_s;
5511 rtvec result_v = NULL;
5512 enum mode_class outer_class;
5513 machine_mode outer_submode;
5514 int max_bitsize;
5516 /* Some ports misuse CCmode. */
5517 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5518 return op;
5520 /* We have no way to represent a complex constant at the rtl level. */
5521 if (COMPLEX_MODE_P (outermode))
5522 return NULL_RTX;
5524 /* We support any size mode. */
5525 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5526 GET_MODE_BITSIZE (innermode));
5528 /* Unpack the value. */
5530 if (GET_CODE (op) == CONST_VECTOR)
5532 num_elem = CONST_VECTOR_NUNITS (op);
5533 elems = &CONST_VECTOR_ELT (op, 0);
5534 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5536 else
5538 num_elem = 1;
5539 elems = &op;
5540 elem_bitsize = max_bitsize;
5542 /* If this asserts, it is too complicated; reducing value_bit may help. */
5543 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5544 /* I don't know how to handle endianness of sub-units. */
5545 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5547 for (elem = 0; elem < num_elem; elem++)
5549 unsigned char * vp;
5550 rtx el = elems[elem];
5552 /* Vectors are kept in target memory order. (This is probably
5553 a mistake.) */
5555 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5556 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5557 / BITS_PER_UNIT);
5558 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5559 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5560 unsigned bytele = (subword_byte % UNITS_PER_WORD
5561 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5562 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5565 switch (GET_CODE (el))
5567 case CONST_INT:
5568 for (i = 0;
5569 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5570 i += value_bit)
5571 *vp++ = INTVAL (el) >> i;
5572 /* CONST_INTs are always logically sign-extended. */
5573 for (; i < elem_bitsize; i += value_bit)
5574 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5575 break;
5577 case CONST_WIDE_INT:
5579 rtx_mode_t val = std::make_pair (el, innermode);
5580 unsigned char extend = wi::sign_mask (val);
5582 for (i = 0; i < elem_bitsize; i += value_bit)
5583 *vp++ = wi::extract_uhwi (val, i, value_bit);
5584 for (; i < elem_bitsize; i += value_bit)
5585 *vp++ = extend;
5587 break;
5589 case CONST_DOUBLE:
5590 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5592 unsigned char extend = 0;
5593 /* If this triggers, someone should have generated a
5594 CONST_INT instead. */
5595 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5597 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5598 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5599 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5601 *vp++
5602 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5603 i += value_bit;
5606 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5607 extend = -1;
5608 for (; i < elem_bitsize; i += value_bit)
5609 *vp++ = extend;
5611 else
5613 /* This is big enough for anything on the platform. */
5614 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5615 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5617 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5618 gcc_assert (bitsize <= elem_bitsize);
5619 gcc_assert (bitsize % value_bit == 0);
5621 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5622 GET_MODE (el));
5624 /* real_to_target produces its result in words affected by
5625 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5626 and use WORDS_BIG_ENDIAN instead; see the documentation
5627 of SUBREG in rtl.texi. */
5628 for (i = 0; i < bitsize; i += value_bit)
5630 int ibase;
5631 if (WORDS_BIG_ENDIAN)
5632 ibase = bitsize - 1 - i;
5633 else
5634 ibase = i;
5635 *vp++ = tmp[ibase / 32] >> i % 32;
5638 /* It shouldn't matter what's done here, so fill it with
5639 zero. */
5640 for (; i < elem_bitsize; i += value_bit)
5641 *vp++ = 0;
5643 break;
5645 case CONST_FIXED:
5646 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5648 for (i = 0; i < elem_bitsize; i += value_bit)
5649 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5651 else
5653 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5654 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5655 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5656 i += value_bit)
5657 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5658 >> (i - HOST_BITS_PER_WIDE_INT);
5659 for (; i < elem_bitsize; i += value_bit)
5660 *vp++ = 0;
5662 break;
5664 default:
5665 gcc_unreachable ();
5669 /* Now, pick the right byte to start with. */
5670 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5671 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5672 will already have offset 0. */
5673 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5675 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5676 - byte);
5677 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5678 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5679 byte = (subword_byte % UNITS_PER_WORD
5680 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5683 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5684 so if it's become negative it will instead be very large.) */
5685 gcc_assert (byte < GET_MODE_SIZE (innermode));
5687 /* Convert from bytes to chunks of size value_bit. */
5688 value_start = byte * (BITS_PER_UNIT / value_bit);
5690 /* Re-pack the value. */
5691 num_elem = GET_MODE_NUNITS (outermode);
5693 if (VECTOR_MODE_P (outermode))
5695 result_v = rtvec_alloc (num_elem);
5696 elems = &RTVEC_ELT (result_v, 0);
5698 else
5699 elems = &result_s;
5701 outer_submode = GET_MODE_INNER (outermode);
5702 outer_class = GET_MODE_CLASS (outer_submode);
5703 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5705 gcc_assert (elem_bitsize % value_bit == 0);
5706 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5708 for (elem = 0; elem < num_elem; elem++)
5710 unsigned char *vp;
5712 /* Vectors are stored in target memory order. (This is probably
5713 a mistake.) */
5715 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5716 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5717 / BITS_PER_UNIT);
5718 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5719 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5720 unsigned bytele = (subword_byte % UNITS_PER_WORD
5721 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5722 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5725 switch (outer_class)
5727 case MODE_INT:
5728 case MODE_PARTIAL_INT:
5730 int u;
5731 int base = 0;
5732 int units
5733 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5734 / HOST_BITS_PER_WIDE_INT;
5735 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5736 wide_int r;
5738 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5739 return NULL_RTX;
5740 for (u = 0; u < units; u++)
5742 unsigned HOST_WIDE_INT buf = 0;
5743 for (i = 0;
5744 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5745 i += value_bit)
5746 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5748 tmp[u] = buf;
5749 base += HOST_BITS_PER_WIDE_INT;
5751 r = wide_int::from_array (tmp, units,
5752 GET_MODE_PRECISION (outer_submode));
5753 #if TARGET_SUPPORTS_WIDE_INT == 0
5754 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5755 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5756 return NULL_RTX;
5757 #endif
5758 elems[elem] = immed_wide_int_const (r, outer_submode);
5760 break;
5762 case MODE_FLOAT:
5763 case MODE_DECIMAL_FLOAT:
5765 REAL_VALUE_TYPE r;
5766 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5768 /* real_from_target wants its input in words affected by
5769 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5770 and use WORDS_BIG_ENDIAN instead; see the documentation
5771 of SUBREG in rtl.texi. */
5772 for (i = 0; i < max_bitsize / 32; i++)
5773 tmp[i] = 0;
5774 for (i = 0; i < elem_bitsize; i += value_bit)
5776 int ibase;
5777 if (WORDS_BIG_ENDIAN)
5778 ibase = elem_bitsize - 1 - i;
5779 else
5780 ibase = i;
5781 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5784 real_from_target (&r, tmp, outer_submode);
5785 elems[elem] = const_double_from_real_value (r, outer_submode);
5787 break;
5789 case MODE_FRACT:
5790 case MODE_UFRACT:
5791 case MODE_ACCUM:
5792 case MODE_UACCUM:
5794 FIXED_VALUE_TYPE f;
5795 f.data.low = 0;
5796 f.data.high = 0;
5797 f.mode = outer_submode;
5799 for (i = 0;
5800 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5801 i += value_bit)
5802 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5803 for (; i < elem_bitsize; i += value_bit)
5804 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5805 << (i - HOST_BITS_PER_WIDE_INT));
5807 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5809 break;
5811 default:
5812 gcc_unreachable ();
5815 if (VECTOR_MODE_P (outermode))
5816 return gen_rtx_CONST_VECTOR (outermode, result_v);
5817 else
5818 return result_s;
5821 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5822 Return 0 if no simplifications are possible. */
5824 simplify_subreg (machine_mode outermode, rtx op,
5825 machine_mode innermode, unsigned int byte)
5827 /* Little bit of sanity checking. */
5828 gcc_assert (innermode != VOIDmode);
5829 gcc_assert (outermode != VOIDmode);
5830 gcc_assert (innermode != BLKmode);
5831 gcc_assert (outermode != BLKmode);
5833 gcc_assert (GET_MODE (op) == innermode
5834 || GET_MODE (op) == VOIDmode);
5836 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5837 return NULL_RTX;
5839 if (byte >= GET_MODE_SIZE (innermode))
5840 return NULL_RTX;
5842 if (outermode == innermode && !byte)
5843 return op;
5845 if (CONST_SCALAR_INT_P (op)
5846 || CONST_DOUBLE_AS_FLOAT_P (op)
5847 || GET_CODE (op) == CONST_FIXED
5848 || GET_CODE (op) == CONST_VECTOR)
5849 return simplify_immed_subreg (outermode, op, innermode, byte);
5851 /* Changing mode twice with SUBREG => just change it once,
5852 or not at all if changing back op starting mode. */
5853 if (GET_CODE (op) == SUBREG)
5855 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5856 int final_offset = byte + SUBREG_BYTE (op);
5857 rtx newx;
5859 if (outermode == innermostmode
5860 && byte == 0 && SUBREG_BYTE (op) == 0)
5861 return SUBREG_REG (op);
5863 /* The SUBREG_BYTE represents offset, as if the value were stored
5864 in memory. Irritating exception is paradoxical subreg, where
5865 we define SUBREG_BYTE to be 0. On big endian machines, this
5866 value should be negative. For a moment, undo this exception. */
5867 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5869 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5870 if (WORDS_BIG_ENDIAN)
5871 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5872 if (BYTES_BIG_ENDIAN)
5873 final_offset += difference % UNITS_PER_WORD;
5875 if (SUBREG_BYTE (op) == 0
5876 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5878 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5879 if (WORDS_BIG_ENDIAN)
5880 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5881 if (BYTES_BIG_ENDIAN)
5882 final_offset += difference % UNITS_PER_WORD;
5885 /* See whether resulting subreg will be paradoxical. */
5886 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5888 /* In nonparadoxical subregs we can't handle negative offsets. */
5889 if (final_offset < 0)
5890 return NULL_RTX;
5891 /* Bail out in case resulting subreg would be incorrect. */
5892 if (final_offset % GET_MODE_SIZE (outermode)
5893 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5894 return NULL_RTX;
5896 else
5898 int offset = 0;
5899 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5901 /* In paradoxical subreg, see if we are still looking on lower part.
5902 If so, our SUBREG_BYTE will be 0. */
5903 if (WORDS_BIG_ENDIAN)
5904 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5905 if (BYTES_BIG_ENDIAN)
5906 offset += difference % UNITS_PER_WORD;
5907 if (offset == final_offset)
5908 final_offset = 0;
5909 else
5910 return NULL_RTX;
5913 /* Recurse for further possible simplifications. */
5914 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5915 final_offset);
5916 if (newx)
5917 return newx;
5918 if (validate_subreg (outermode, innermostmode,
5919 SUBREG_REG (op), final_offset))
5921 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5922 if (SUBREG_PROMOTED_VAR_P (op)
5923 && SUBREG_PROMOTED_SIGN (op) >= 0
5924 && GET_MODE_CLASS (outermode) == MODE_INT
5925 && IN_RANGE (GET_MODE_SIZE (outermode),
5926 GET_MODE_SIZE (innermode),
5927 GET_MODE_SIZE (innermostmode))
5928 && subreg_lowpart_p (newx))
5930 SUBREG_PROMOTED_VAR_P (newx) = 1;
5931 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5933 return newx;
5935 return NULL_RTX;
5938 /* SUBREG of a hard register => just change the register number
5939 and/or mode. If the hard register is not valid in that mode,
5940 suppress this simplification. If the hard register is the stack,
5941 frame, or argument pointer, leave this as a SUBREG. */
5943 if (REG_P (op) && HARD_REGISTER_P (op))
5945 unsigned int regno, final_regno;
5947 regno = REGNO (op);
5948 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5949 if (HARD_REGISTER_NUM_P (final_regno))
5951 rtx x;
5952 int final_offset = byte;
5954 /* Adjust offset for paradoxical subregs. */
5955 if (byte == 0
5956 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5958 int difference = (GET_MODE_SIZE (innermode)
5959 - GET_MODE_SIZE (outermode));
5960 if (WORDS_BIG_ENDIAN)
5961 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5962 if (BYTES_BIG_ENDIAN)
5963 final_offset += difference % UNITS_PER_WORD;
5966 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5968 /* Propagate original regno. We don't have any way to specify
5969 the offset inside original regno, so do so only for lowpart.
5970 The information is used only by alias analysis that can not
5971 grog partial register anyway. */
5973 if (subreg_lowpart_offset (outermode, innermode) == byte)
5974 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5975 return x;
5979 /* If we have a SUBREG of a register that we are replacing and we are
5980 replacing it with a MEM, make a new MEM and try replacing the
5981 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5982 or if we would be widening it. */
5984 if (MEM_P (op)
5985 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5986 /* Allow splitting of volatile memory references in case we don't
5987 have instruction to move the whole thing. */
5988 && (! MEM_VOLATILE_P (op)
5989 || ! have_insn_for (SET, innermode))
5990 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5991 return adjust_address_nv (op, outermode, byte);
5993 /* Handle complex values represented as CONCAT
5994 of real and imaginary part. */
5995 if (GET_CODE (op) == CONCAT)
5997 unsigned int part_size, final_offset;
5998 rtx part, res;
6000 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6001 if (byte < part_size)
6003 part = XEXP (op, 0);
6004 final_offset = byte;
6006 else
6008 part = XEXP (op, 1);
6009 final_offset = byte - part_size;
6012 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6013 return NULL_RTX;
6015 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6016 if (res)
6017 return res;
6018 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6019 return gen_rtx_SUBREG (outermode, part, final_offset);
6020 return NULL_RTX;
6023 /* A SUBREG resulting from a zero extension may fold to zero if
6024 it extracts higher bits that the ZERO_EXTEND's source bits. */
6025 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6027 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6028 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6029 return CONST0_RTX (outermode);
6032 if (SCALAR_INT_MODE_P (outermode)
6033 && SCALAR_INT_MODE_P (innermode)
6034 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6035 && byte == subreg_lowpart_offset (outermode, innermode))
6037 rtx tem = simplify_truncation (outermode, op, innermode);
6038 if (tem)
6039 return tem;
6042 return NULL_RTX;
6045 /* Make a SUBREG operation or equivalent if it folds. */
6048 simplify_gen_subreg (machine_mode outermode, rtx op,
6049 machine_mode innermode, unsigned int byte)
6051 rtx newx;
6053 newx = simplify_subreg (outermode, op, innermode, byte);
6054 if (newx)
6055 return newx;
6057 if (GET_CODE (op) == SUBREG
6058 || GET_CODE (op) == CONCAT
6059 || GET_MODE (op) == VOIDmode)
6060 return NULL_RTX;
6062 if (validate_subreg (outermode, innermode, op, byte))
6063 return gen_rtx_SUBREG (outermode, op, byte);
6065 return NULL_RTX;
6068 /* Generates a subreg to get the least significant part of EXPR (in mode
6069 INNER_MODE) to OUTER_MODE. */
6072 lowpart_subreg (machine_mode outer_mode, rtx expr,
6073 machine_mode inner_mode)
6075 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6076 subreg_lowpart_offset (outer_mode, inner_mode));
6079 /* Simplify X, an rtx expression.
6081 Return the simplified expression or NULL if no simplifications
6082 were possible.
6084 This is the preferred entry point into the simplification routines;
6085 however, we still allow passes to call the more specific routines.
6087 Right now GCC has three (yes, three) major bodies of RTL simplification
6088 code that need to be unified.
6090 1. fold_rtx in cse.c. This code uses various CSE specific
6091 information to aid in RTL simplification.
6093 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6094 it uses combine specific information to aid in RTL
6095 simplification.
6097 3. The routines in this file.
6100 Long term we want to only have one body of simplification code; to
6101 get to that state I recommend the following steps:
6103 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6104 which are not pass dependent state into these routines.
6106 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6107 use this routine whenever possible.
6109 3. Allow for pass dependent state to be provided to these
6110 routines and add simplifications based on the pass dependent
6111 state. Remove code from cse.c & combine.c that becomes
6112 redundant/dead.
6114 It will take time, but ultimately the compiler will be easier to
6115 maintain and improve. It's totally silly that when we add a
6116 simplification that it needs to be added to 4 places (3 for RTL
6117 simplification and 1 for tree simplification. */
6120 simplify_rtx (const_rtx x)
6122 const enum rtx_code code = GET_CODE (x);
6123 const machine_mode mode = GET_MODE (x);
6125 switch (GET_RTX_CLASS (code))
6127 case RTX_UNARY:
6128 return simplify_unary_operation (code, mode,
6129 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6130 case RTX_COMM_ARITH:
6131 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6132 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6134 /* Fall through.... */
6136 case RTX_BIN_ARITH:
6137 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6139 case RTX_TERNARY:
6140 case RTX_BITFIELD_OPS:
6141 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6142 XEXP (x, 0), XEXP (x, 1),
6143 XEXP (x, 2));
6145 case RTX_COMPARE:
6146 case RTX_COMM_COMPARE:
6147 return simplify_relational_operation (code, mode,
6148 ((GET_MODE (XEXP (x, 0))
6149 != VOIDmode)
6150 ? GET_MODE (XEXP (x, 0))
6151 : GET_MODE (XEXP (x, 1))),
6152 XEXP (x, 0),
6153 XEXP (x, 1));
6155 case RTX_EXTRA:
6156 if (code == SUBREG)
6157 return simplify_subreg (mode, SUBREG_REG (x),
6158 GET_MODE (SUBREG_REG (x)),
6159 SUBREG_BYTE (x));
6160 break;
6162 case RTX_OBJ:
6163 if (code == LO_SUM)
6165 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6166 if (GET_CODE (XEXP (x, 0)) == HIGH
6167 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6168 return XEXP (x, 1);
6170 break;
6172 default:
6173 break;
6175 return NULL;