* gcc-interface/trans.c (Subprogram_Body_to_gnu): Initialize locus.
[official-gcc.git] / gcc / simplify-rtx.c
blob806c3099923bc9acd00a55b24711293a1b9ec101
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
328 break;
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
340 rtx newx;
342 offset += MEM_OFFSET (x);
344 newx = DECL_RTL (decl);
346 if (MEM_P (newx))
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
375 return x;
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
385 rtx tem;
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
391 return gen_rtx_fmt_e (code, mode, op);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
400 rtx tem;
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
417 rtx tem;
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
432 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
443 if (__builtin_expect (fn != NULL, 0))
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
452 switch (GET_RTX_CLASS (code))
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
494 case RTX_EXTRA:
495 if (code == SUBREG)
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
505 break;
507 case RTX_OBJ:
508 if (code == MEM)
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
515 else if (code == LO_SUM)
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
534 break;
536 default:
537 break;
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
554 if (newvec == vec)
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
561 RTVEC_ELT (newvec, j) = op;
564 break;
566 case 'e':
567 if (XEXP (x, i))
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
577 break;
579 return newx;
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
617 should be used.
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
625 (and:DI X Y)
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
638 static rtx
639 simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 scalar_int_mode int_mode, int_op_mode, subreg_mode;
646 gcc_assert (precision <= op_precision);
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
730 and C2. */
731 if (GET_CODE (op) == AND
732 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
734 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
735 && CONST_INT_P (XEXP (op, 1)))
737 rtx op0 = (XEXP (XEXP (op, 0), 0));
738 rtx shift_op = XEXP (XEXP (op, 0), 1);
739 rtx mask_op = XEXP (op, 1);
740 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
741 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
743 if (shift < precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode) >> shift) & mask)
747 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
748 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
749 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
751 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
752 return simplify_gen_binary (AND, mode, op0, mask_op);
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
758 changing len. */
759 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
760 && REG_P (XEXP (op, 0))
761 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
762 && CONST_INT_P (XEXP (op, 1))
763 && CONST_INT_P (XEXP (op, 2)))
765 rtx op0 = XEXP (op, 0);
766 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
767 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
768 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
773 pos -= op_precision - precision;
774 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
775 XEXP (op, 1), GEN_INT (pos));
778 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
780 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
781 if (op0)
782 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
783 XEXP (op, 1), XEXP (op, 2));
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && SCALAR_INT_MODE_P (mode)
791 && SCALAR_INT_MODE_P (op_mode)
792 && precision >= BITS_PER_WORD
793 && 2 * precision <= op_precision
794 && CONST_INT_P (XEXP (op, 1))
795 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
796 && UINTVAL (XEXP (op, 1)) < op_precision)
798 int byte = subreg_lowpart_offset (mode, op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op) == LSHIFTRT
810 || GET_CODE (op) == ASHIFTRT)
811 && is_a <scalar_int_mode> (mode, &int_mode)
812 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
813 && MEM_P (XEXP (op, 0))
814 && CONST_INT_P (XEXP (op, 1))
815 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
816 && INTVAL (XEXP (op, 1)) > 0
817 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
818 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op, 0))
821 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
824 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
825 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
826 return adjust_address_nv (XEXP (op, 0), int_mode,
827 (WORDS_BIG_ENDIAN
828 ? byte - shifted_bytes
829 : byte + shifted_bytes));
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op) == ABS
835 || GET_CODE (op) == NEG)
836 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
838 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
839 return simplify_gen_unary (GET_CODE (op), mode,
840 XEXP (XEXP (op, 0), 0), mode);
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
843 (truncate:A X). */
844 if (GET_CODE (op) == SUBREG
845 && is_a <scalar_int_mode> (mode, &int_mode)
846 && SCALAR_INT_MODE_P (op_mode)
847 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
848 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
849 && subreg_lowpart_p (op))
851 rtx inner = XEXP (SUBREG_REG (op), 0);
852 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
853 return simplify_gen_unary (TRUNCATE, int_mode, inner,
854 GET_MODE (inner));
855 else
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
875 return NULL_RTX;
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_unary_operation (enum rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
885 rtx trueop, tem;
887 trueop = avoid_constant_pool_reference (op);
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
893 return simplify_unary_operation_1 (code, mode, op);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
919 return in_bits <= out_bits;
922 /* Perform some simplifications we can do even if the operands
923 aren't constant. */
924 static rtx
925 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
927 enum rtx_code reversed;
928 rtx temp, elt, base, step;
929 scalar_int_mode inner, int_mode, op_mode, op0_mode;
931 switch (code)
933 case NOT:
934 /* (not (not X)) == X. */
935 if (GET_CODE (op) == NOT)
936 return XEXP (op, 0);
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op)
941 && (mode == BImode || STORE_FLAG_VALUE == -1)
942 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
943 return simplify_gen_relational (reversed, mode, VOIDmode,
944 XEXP (op, 0), XEXP (op, 1));
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == constm1_rtx)
949 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
955 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
956 CONSTM1_RTX (mode));
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op) == XOR
960 && CONST_INT_P (XEXP (op, 1))
961 && (temp = simplify_unary_operation (NOT, mode,
962 XEXP (op, 1), mode)) != 0)
963 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op) == PLUS
967 && CONST_INT_P (XEXP (op, 1))
968 && mode_signbit_p (mode, XEXP (op, 1))
969 && (temp = simplify_unary_operation (NOT, mode,
970 XEXP (op, 1), mode)) != 0)
971 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
978 bother with. */
979 if (GET_CODE (op) == ASHIFT
980 && XEXP (op, 0) == const1_rtx)
982 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
983 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE == -1
990 && is_a <scalar_int_mode> (mode, &int_mode)
991 && GET_CODE (op) == ASHIFTRT
992 && CONST_INT_P (XEXP (op, 1))
993 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
994 return simplify_gen_relational (GE, int_mode, VOIDmode,
995 XEXP (op, 0), const0_rtx);
998 if (partial_subreg_p (op)
999 && subreg_lowpart_p (op)
1000 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1001 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1003 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1004 rtx x;
1006 x = gen_rtx_ROTATE (inner_mode,
1007 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1008 inner_mode),
1009 XEXP (SUBREG_REG (op), 1));
1010 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1011 if (temp)
1012 return temp;
1015 /* Apply De Morgan's laws to reduce number of patterns for machines
1016 with negating logical insns (and-not, nand, etc.). If result has
1017 only one NOT, put it first, since that is how the patterns are
1018 coded. */
1019 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1021 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1022 machine_mode op_mode;
1024 op_mode = GET_MODE (in1);
1025 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1027 op_mode = GET_MODE (in2);
1028 if (op_mode == VOIDmode)
1029 op_mode = mode;
1030 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1032 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1033 std::swap (in1, in2);
1035 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1036 mode, in1, in2);
1039 /* (not (bswap x)) -> (bswap (not x)). */
1040 if (GET_CODE (op) == BSWAP)
1042 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1043 return simplify_gen_unary (BSWAP, mode, x, mode);
1045 break;
1047 case NEG:
1048 /* (neg (neg X)) == X. */
1049 if (GET_CODE (op) == NEG)
1050 return XEXP (op, 0);
1052 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1053 If comparison is not reversible use
1054 x ? y : (neg y). */
1055 if (GET_CODE (op) == IF_THEN_ELSE)
1057 rtx cond = XEXP (op, 0);
1058 rtx true_rtx = XEXP (op, 1);
1059 rtx false_rtx = XEXP (op, 2);
1061 if ((GET_CODE (true_rtx) == NEG
1062 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1063 || (GET_CODE (false_rtx) == NEG
1064 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1066 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1067 temp = reversed_comparison (cond, mode);
1068 else
1070 temp = cond;
1071 std::swap (true_rtx, false_rtx);
1073 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1074 mode, temp, true_rtx, false_rtx);
1078 /* (neg (plus X 1)) can become (not X). */
1079 if (GET_CODE (op) == PLUS
1080 && XEXP (op, 1) == const1_rtx)
1081 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1083 /* Similarly, (neg (not X)) is (plus X 1). */
1084 if (GET_CODE (op) == NOT)
1085 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1086 CONST1_RTX (mode));
1088 /* (neg (minus X Y)) can become (minus Y X). This transformation
1089 isn't safe for modes with signed zeros, since if X and Y are
1090 both +0, (minus Y X) is the same as (minus X Y). If the
1091 rounding mode is towards +infinity (or -infinity) then the two
1092 expressions will be rounded differently. */
1093 if (GET_CODE (op) == MINUS
1094 && !HONOR_SIGNED_ZEROS (mode)
1095 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1096 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1098 if (GET_CODE (op) == PLUS
1099 && !HONOR_SIGNED_ZEROS (mode)
1100 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1102 /* (neg (plus A C)) is simplified to (minus -C A). */
1103 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1104 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1106 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1107 if (temp)
1108 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1111 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1112 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1113 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1116 /* (neg (mult A B)) becomes (mult A (neg B)).
1117 This works even for floating-point values. */
1118 if (GET_CODE (op) == MULT
1119 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1121 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1122 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1125 /* NEG commutes with ASHIFT since it is multiplication. Only do
1126 this if we can then eliminate the NEG (e.g., if the operand
1127 is a constant). */
1128 if (GET_CODE (op) == ASHIFT)
1130 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1131 if (temp)
1132 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1135 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1136 C is equal to the width of MODE minus 1. */
1137 if (GET_CODE (op) == ASHIFTRT
1138 && CONST_INT_P (XEXP (op, 1))
1139 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1140 return simplify_gen_binary (LSHIFTRT, mode,
1141 XEXP (op, 0), XEXP (op, 1));
1143 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1144 C is equal to the width of MODE minus 1. */
1145 if (GET_CODE (op) == LSHIFTRT
1146 && CONST_INT_P (XEXP (op, 1))
1147 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1148 return simplify_gen_binary (ASHIFTRT, mode,
1149 XEXP (op, 0), XEXP (op, 1));
1151 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1152 if (GET_CODE (op) == XOR
1153 && XEXP (op, 1) == const1_rtx
1154 && nonzero_bits (XEXP (op, 0), mode) == 1)
1155 return plus_constant (mode, XEXP (op, 0), -1);
1157 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1158 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1159 if (GET_CODE (op) == LT
1160 && XEXP (op, 1) == const0_rtx
1161 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1163 int_mode = as_a <scalar_int_mode> (mode);
1164 int isize = GET_MODE_PRECISION (inner);
1165 if (STORE_FLAG_VALUE == 1)
1167 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1168 GEN_INT (isize - 1));
1169 if (int_mode == inner)
1170 return temp;
1171 if (GET_MODE_PRECISION (int_mode) > isize)
1172 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1173 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1175 else if (STORE_FLAG_VALUE == -1)
1177 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1178 GEN_INT (isize - 1));
1179 if (int_mode == inner)
1180 return temp;
1181 if (GET_MODE_PRECISION (int_mode) > isize)
1182 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1183 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1187 if (vec_series_p (op, &base, &step))
1189 /* Only create a new series if we can simplify both parts. In other
1190 cases this isn't really a simplification, and it's not necessarily
1191 a win to replace a vector operation with a scalar operation. */
1192 scalar_mode inner_mode = GET_MODE_INNER (mode);
1193 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1194 if (base)
1196 step = simplify_unary_operation (NEG, inner_mode,
1197 step, inner_mode);
1198 if (step)
1199 return gen_vec_series (mode, base, step);
1202 break;
1204 case TRUNCATE:
1205 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1206 with the umulXi3_highpart patterns. */
1207 if (GET_CODE (op) == LSHIFTRT
1208 && GET_CODE (XEXP (op, 0)) == MULT)
1209 break;
1211 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1213 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1215 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1216 if (temp)
1217 return temp;
1219 /* We can't handle truncation to a partial integer mode here
1220 because we don't know the real bitsize of the partial
1221 integer mode. */
1222 break;
1225 if (GET_MODE (op) != VOIDmode)
1227 temp = simplify_truncation (mode, op, GET_MODE (op));
1228 if (temp)
1229 return temp;
1232 /* If we know that the value is already truncated, we can
1233 replace the TRUNCATE with a SUBREG. */
1234 if (GET_MODE_NUNITS (mode) == 1
1235 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1236 || truncated_to_mode (mode, op)))
1238 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1239 if (temp)
1240 return temp;
1243 /* A truncate of a comparison can be replaced with a subreg if
1244 STORE_FLAG_VALUE permits. This is like the previous test,
1245 but it works even if the comparison is done in a mode larger
1246 than HOST_BITS_PER_WIDE_INT. */
1247 if (HWI_COMPUTABLE_MODE_P (mode)
1248 && COMPARISON_P (op)
1249 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1251 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1252 if (temp)
1253 return temp;
1256 /* A truncate of a memory is just loading the low part of the memory
1257 if we are not changing the meaning of the address. */
1258 if (GET_CODE (op) == MEM
1259 && !VECTOR_MODE_P (mode)
1260 && !MEM_VOLATILE_P (op)
1261 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1263 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1264 if (temp)
1265 return temp;
1268 break;
1270 case FLOAT_TRUNCATE:
1271 if (DECIMAL_FLOAT_MODE_P (mode))
1272 break;
1274 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1275 if (GET_CODE (op) == FLOAT_EXTEND
1276 && GET_MODE (XEXP (op, 0)) == mode)
1277 return XEXP (op, 0);
1279 /* (float_truncate:SF (float_truncate:DF foo:XF))
1280 = (float_truncate:SF foo:XF).
1281 This may eliminate double rounding, so it is unsafe.
1283 (float_truncate:SF (float_extend:XF foo:DF))
1284 = (float_truncate:SF foo:DF).
1286 (float_truncate:DF (float_extend:XF foo:SF))
1287 = (float_extend:DF foo:SF). */
1288 if ((GET_CODE (op) == FLOAT_TRUNCATE
1289 && flag_unsafe_math_optimizations)
1290 || GET_CODE (op) == FLOAT_EXTEND)
1291 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1292 > GET_MODE_UNIT_SIZE (mode)
1293 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1294 mode,
1295 XEXP (op, 0), mode);
1297 /* (float_truncate (float x)) is (float x) */
1298 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1299 && (flag_unsafe_math_optimizations
1300 || exact_int_to_float_conversion_p (op)))
1301 return simplify_gen_unary (GET_CODE (op), mode,
1302 XEXP (op, 0),
1303 GET_MODE (XEXP (op, 0)));
1305 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1306 (OP:SF foo:SF) if OP is NEG or ABS. */
1307 if ((GET_CODE (op) == ABS
1308 || GET_CODE (op) == NEG)
1309 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1310 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1311 return simplify_gen_unary (GET_CODE (op), mode,
1312 XEXP (XEXP (op, 0), 0), mode);
1314 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1315 is (float_truncate:SF x). */
1316 if (GET_CODE (op) == SUBREG
1317 && subreg_lowpart_p (op)
1318 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1319 return SUBREG_REG (op);
1320 break;
1322 case FLOAT_EXTEND:
1323 if (DECIMAL_FLOAT_MODE_P (mode))
1324 break;
1326 /* (float_extend (float_extend x)) is (float_extend x)
1328 (float_extend (float x)) is (float x) assuming that double
1329 rounding can't happen.
1331 if (GET_CODE (op) == FLOAT_EXTEND
1332 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1333 && exact_int_to_float_conversion_p (op)))
1334 return simplify_gen_unary (GET_CODE (op), mode,
1335 XEXP (op, 0),
1336 GET_MODE (XEXP (op, 0)));
1338 break;
1340 case ABS:
1341 /* (abs (neg <foo>)) -> (abs <foo>) */
1342 if (GET_CODE (op) == NEG)
1343 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1344 GET_MODE (XEXP (op, 0)));
1346 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1347 do nothing. */
1348 if (GET_MODE (op) == VOIDmode)
1349 break;
1351 /* If operand is something known to be positive, ignore the ABS. */
1352 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1353 || val_signbit_known_clear_p (GET_MODE (op),
1354 nonzero_bits (op, GET_MODE (op))))
1355 return op;
1357 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1358 if (is_a <scalar_int_mode> (mode, &int_mode)
1359 && (num_sign_bit_copies (op, int_mode)
1360 == GET_MODE_PRECISION (int_mode)))
1361 return gen_rtx_NEG (int_mode, op);
1363 break;
1365 case FFS:
1366 /* (ffs (*_extend <X>)) = (ffs <X>) */
1367 if (GET_CODE (op) == SIGN_EXTEND
1368 || GET_CODE (op) == ZERO_EXTEND)
1369 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1370 GET_MODE (XEXP (op, 0)));
1371 break;
1373 case POPCOUNT:
1374 switch (GET_CODE (op))
1376 case BSWAP:
1377 case ZERO_EXTEND:
1378 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1379 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1380 GET_MODE (XEXP (op, 0)));
1382 case ROTATE:
1383 case ROTATERT:
1384 /* Rotations don't affect popcount. */
1385 if (!side_effects_p (XEXP (op, 1)))
1386 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1387 GET_MODE (XEXP (op, 0)));
1388 break;
1390 default:
1391 break;
1393 break;
1395 case PARITY:
1396 switch (GET_CODE (op))
1398 case NOT:
1399 case BSWAP:
1400 case ZERO_EXTEND:
1401 case SIGN_EXTEND:
1402 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1403 GET_MODE (XEXP (op, 0)));
1405 case ROTATE:
1406 case ROTATERT:
1407 /* Rotations don't affect parity. */
1408 if (!side_effects_p (XEXP (op, 1)))
1409 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1410 GET_MODE (XEXP (op, 0)));
1411 break;
1413 default:
1414 break;
1416 break;
1418 case BSWAP:
1419 /* (bswap (bswap x)) -> x. */
1420 if (GET_CODE (op) == BSWAP)
1421 return XEXP (op, 0);
1422 break;
1424 case FLOAT:
1425 /* (float (sign_extend <X>)) = (float <X>). */
1426 if (GET_CODE (op) == SIGN_EXTEND)
1427 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1428 GET_MODE (XEXP (op, 0)));
1429 break;
1431 case SIGN_EXTEND:
1432 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1433 becomes just the MINUS if its mode is MODE. This allows
1434 folding switch statements on machines using casesi (such as
1435 the VAX). */
1436 if (GET_CODE (op) == TRUNCATE
1437 && GET_MODE (XEXP (op, 0)) == mode
1438 && GET_CODE (XEXP (op, 0)) == MINUS
1439 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1440 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1441 return XEXP (op, 0);
1443 /* Extending a widening multiplication should be canonicalized to
1444 a wider widening multiplication. */
1445 if (GET_CODE (op) == MULT)
1447 rtx lhs = XEXP (op, 0);
1448 rtx rhs = XEXP (op, 1);
1449 enum rtx_code lcode = GET_CODE (lhs);
1450 enum rtx_code rcode = GET_CODE (rhs);
1452 /* Widening multiplies usually extend both operands, but sometimes
1453 they use a shift to extract a portion of a register. */
1454 if ((lcode == SIGN_EXTEND
1455 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1456 && (rcode == SIGN_EXTEND
1457 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1459 machine_mode lmode = GET_MODE (lhs);
1460 machine_mode rmode = GET_MODE (rhs);
1461 int bits;
1463 if (lcode == ASHIFTRT)
1464 /* Number of bits not shifted off the end. */
1465 bits = (GET_MODE_UNIT_PRECISION (lmode)
1466 - INTVAL (XEXP (lhs, 1)));
1467 else /* lcode == SIGN_EXTEND */
1468 /* Size of inner mode. */
1469 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1471 if (rcode == ASHIFTRT)
1472 bits += (GET_MODE_UNIT_PRECISION (rmode)
1473 - INTVAL (XEXP (rhs, 1)));
1474 else /* rcode == SIGN_EXTEND */
1475 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1477 /* We can only widen multiplies if the result is mathematiclly
1478 equivalent. I.e. if overflow was impossible. */
1479 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1480 return simplify_gen_binary
1481 (MULT, mode,
1482 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1483 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1487 /* Check for a sign extension of a subreg of a promoted
1488 variable, where the promotion is sign-extended, and the
1489 target mode is the same as the variable's promotion. */
1490 if (GET_CODE (op) == SUBREG
1491 && SUBREG_PROMOTED_VAR_P (op)
1492 && SUBREG_PROMOTED_SIGNED_P (op)
1493 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1495 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1496 if (temp)
1497 return temp;
1500 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1501 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1502 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1504 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1505 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1506 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1507 GET_MODE (XEXP (op, 0)));
1510 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1511 is (sign_extend:M (subreg:O <X>)) if there is mode with
1512 GET_MODE_BITSIZE (N) - I bits.
1513 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1514 is similarly (zero_extend:M (subreg:O <X>)). */
1515 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1516 && GET_CODE (XEXP (op, 0)) == ASHIFT
1517 && is_a <scalar_int_mode> (mode, &int_mode)
1518 && CONST_INT_P (XEXP (op, 1))
1519 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1520 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1521 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1523 scalar_int_mode tmode;
1524 gcc_assert (GET_MODE_BITSIZE (int_mode)
1525 > GET_MODE_BITSIZE (op_mode));
1526 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1527 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1529 rtx inner =
1530 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1531 if (inner)
1532 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1533 ? SIGN_EXTEND : ZERO_EXTEND,
1534 int_mode, inner, tmode);
1538 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1539 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1540 if (GET_CODE (op) == LSHIFTRT
1541 && CONST_INT_P (XEXP (op, 1))
1542 && XEXP (op, 1) != const0_rtx)
1543 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1545 #if defined(POINTERS_EXTEND_UNSIGNED)
1546 /* As we do not know which address space the pointer is referring to,
1547 we can do this only if the target does not support different pointer
1548 or address modes depending on the address space. */
1549 if (target_default_pointer_address_modes_p ()
1550 && ! POINTERS_EXTEND_UNSIGNED
1551 && mode == Pmode && GET_MODE (op) == ptr_mode
1552 && (CONSTANT_P (op)
1553 || (GET_CODE (op) == SUBREG
1554 && REG_P (SUBREG_REG (op))
1555 && REG_POINTER (SUBREG_REG (op))
1556 && GET_MODE (SUBREG_REG (op)) == Pmode))
1557 && !targetm.have_ptr_extend ())
1559 temp
1560 = convert_memory_address_addr_space_1 (Pmode, op,
1561 ADDR_SPACE_GENERIC, false,
1562 true);
1563 if (temp)
1564 return temp;
1566 #endif
1567 break;
1569 case ZERO_EXTEND:
1570 /* Check for a zero extension of a subreg of a promoted
1571 variable, where the promotion is zero-extended, and the
1572 target mode is the same as the variable's promotion. */
1573 if (GET_CODE (op) == SUBREG
1574 && SUBREG_PROMOTED_VAR_P (op)
1575 && SUBREG_PROMOTED_UNSIGNED_P (op)
1576 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1578 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1579 if (temp)
1580 return temp;
1583 /* Extending a widening multiplication should be canonicalized to
1584 a wider widening multiplication. */
1585 if (GET_CODE (op) == MULT)
1587 rtx lhs = XEXP (op, 0);
1588 rtx rhs = XEXP (op, 1);
1589 enum rtx_code lcode = GET_CODE (lhs);
1590 enum rtx_code rcode = GET_CODE (rhs);
1592 /* Widening multiplies usually extend both operands, but sometimes
1593 they use a shift to extract a portion of a register. */
1594 if ((lcode == ZERO_EXTEND
1595 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1596 && (rcode == ZERO_EXTEND
1597 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1599 machine_mode lmode = GET_MODE (lhs);
1600 machine_mode rmode = GET_MODE (rhs);
1601 int bits;
1603 if (lcode == LSHIFTRT)
1604 /* Number of bits not shifted off the end. */
1605 bits = (GET_MODE_UNIT_PRECISION (lmode)
1606 - INTVAL (XEXP (lhs, 1)));
1607 else /* lcode == ZERO_EXTEND */
1608 /* Size of inner mode. */
1609 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1611 if (rcode == LSHIFTRT)
1612 bits += (GET_MODE_UNIT_PRECISION (rmode)
1613 - INTVAL (XEXP (rhs, 1)));
1614 else /* rcode == ZERO_EXTEND */
1615 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1617 /* We can only widen multiplies if the result is mathematiclly
1618 equivalent. I.e. if overflow was impossible. */
1619 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1620 return simplify_gen_binary
1621 (MULT, mode,
1622 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1623 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1627 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1628 if (GET_CODE (op) == ZERO_EXTEND)
1629 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1630 GET_MODE (XEXP (op, 0)));
1632 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1633 is (zero_extend:M (subreg:O <X>)) if there is mode with
1634 GET_MODE_PRECISION (N) - I bits. */
1635 if (GET_CODE (op) == LSHIFTRT
1636 && GET_CODE (XEXP (op, 0)) == ASHIFT
1637 && is_a <scalar_int_mode> (mode, &int_mode)
1638 && CONST_INT_P (XEXP (op, 1))
1639 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1640 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1641 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1643 scalar_int_mode tmode;
1644 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1645 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1647 rtx inner =
1648 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1649 if (inner)
1650 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1651 inner, tmode);
1655 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1656 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1657 of mode N. E.g.
1658 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1659 (and:SI (reg:SI) (const_int 63)). */
1660 if (partial_subreg_p (op)
1661 && is_a <scalar_int_mode> (mode, &int_mode)
1662 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1663 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1664 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1665 && subreg_lowpart_p (op)
1666 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1667 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1669 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1670 return SUBREG_REG (op);
1671 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1672 op0_mode);
1675 #if defined(POINTERS_EXTEND_UNSIGNED)
1676 /* As we do not know which address space the pointer is referring to,
1677 we can do this only if the target does not support different pointer
1678 or address modes depending on the address space. */
1679 if (target_default_pointer_address_modes_p ()
1680 && POINTERS_EXTEND_UNSIGNED > 0
1681 && mode == Pmode && GET_MODE (op) == ptr_mode
1682 && (CONSTANT_P (op)
1683 || (GET_CODE (op) == SUBREG
1684 && REG_P (SUBREG_REG (op))
1685 && REG_POINTER (SUBREG_REG (op))
1686 && GET_MODE (SUBREG_REG (op)) == Pmode))
1687 && !targetm.have_ptr_extend ())
1689 temp
1690 = convert_memory_address_addr_space_1 (Pmode, op,
1691 ADDR_SPACE_GENERIC, false,
1692 true);
1693 if (temp)
1694 return temp;
1696 #endif
1697 break;
1699 default:
1700 break;
1703 if (VECTOR_MODE_P (mode) && vec_duplicate_p (op, &elt))
1705 /* Try applying the operator to ELT and see if that simplifies.
1706 We can duplicate the result if so.
1708 The reason we don't use simplify_gen_unary is that it isn't
1709 necessarily a win to convert things like:
1711 (neg:V (vec_duplicate:V (reg:S R)))
1715 (vec_duplicate:V (neg:S (reg:S R)))
1717 The first might be done entirely in vector registers while the
1718 second might need a move between register files. */
1719 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1720 elt, GET_MODE_INNER (GET_MODE (op)));
1721 if (temp)
1722 return gen_vec_duplicate (mode, temp);
1725 return 0;
1728 /* Try to compute the value of a unary operation CODE whose output mode is to
1729 be MODE with input operand OP whose mode was originally OP_MODE.
1730 Return zero if the value cannot be computed. */
1732 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1733 rtx op, machine_mode op_mode)
1735 scalar_int_mode result_mode;
1737 if (code == VEC_DUPLICATE)
1739 gcc_assert (VECTOR_MODE_P (mode));
1740 if (GET_MODE (op) != VOIDmode)
1742 if (!VECTOR_MODE_P (GET_MODE (op)))
1743 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1744 else
1745 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1746 (GET_MODE (op)));
1748 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1749 return gen_const_vec_duplicate (mode, op);
1750 if (GET_CODE (op) == CONST_VECTOR)
1752 unsigned int n_elts = GET_MODE_NUNITS (mode);
1753 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op);
1754 gcc_assert (in_n_elts < n_elts);
1755 gcc_assert ((n_elts % in_n_elts) == 0);
1756 rtvec v = rtvec_alloc (n_elts);
1757 for (unsigned i = 0; i < n_elts; i++)
1758 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1759 return gen_rtx_CONST_VECTOR (mode, v);
1763 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1765 int elt_size = GET_MODE_UNIT_SIZE (mode);
1766 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1767 machine_mode opmode = GET_MODE (op);
1768 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1769 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1770 rtvec v = rtvec_alloc (n_elts);
1771 unsigned int i;
1773 gcc_assert (op_n_elts == n_elts);
1774 for (i = 0; i < n_elts; i++)
1776 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1777 CONST_VECTOR_ELT (op, i),
1778 GET_MODE_INNER (opmode));
1779 if (!x)
1780 return 0;
1781 RTVEC_ELT (v, i) = x;
1783 return gen_rtx_CONST_VECTOR (mode, v);
1786 /* The order of these tests is critical so that, for example, we don't
1787 check the wrong mode (input vs. output) for a conversion operation,
1788 such as FIX. At some point, this should be simplified. */
1790 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1792 REAL_VALUE_TYPE d;
1794 if (op_mode == VOIDmode)
1796 /* CONST_INT have VOIDmode as the mode. We assume that all
1797 the bits of the constant are significant, though, this is
1798 a dangerous assumption as many times CONST_INTs are
1799 created and used with garbage in the bits outside of the
1800 precision of the implied mode of the const_int. */
1801 op_mode = MAX_MODE_INT;
1804 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1806 /* Avoid the folding if flag_signaling_nans is on and
1807 operand is a signaling NaN. */
1808 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1809 return 0;
1811 d = real_value_truncate (mode, d);
1812 return const_double_from_real_value (d, mode);
1814 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1816 REAL_VALUE_TYPE d;
1818 if (op_mode == VOIDmode)
1820 /* CONST_INT have VOIDmode as the mode. We assume that all
1821 the bits of the constant are significant, though, this is
1822 a dangerous assumption as many times CONST_INTs are
1823 created and used with garbage in the bits outside of the
1824 precision of the implied mode of the const_int. */
1825 op_mode = MAX_MODE_INT;
1828 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1830 /* Avoid the folding if flag_signaling_nans is on and
1831 operand is a signaling NaN. */
1832 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1833 return 0;
1835 d = real_value_truncate (mode, d);
1836 return const_double_from_real_value (d, mode);
1839 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1841 unsigned int width = GET_MODE_PRECISION (result_mode);
1842 wide_int result;
1843 scalar_int_mode imode = (op_mode == VOIDmode
1844 ? result_mode
1845 : as_a <scalar_int_mode> (op_mode));
1846 rtx_mode_t op0 = rtx_mode_t (op, imode);
1847 int int_value;
1849 #if TARGET_SUPPORTS_WIDE_INT == 0
1850 /* This assert keeps the simplification from producing a result
1851 that cannot be represented in a CONST_DOUBLE but a lot of
1852 upstream callers expect that this function never fails to
1853 simplify something and so you if you added this to the test
1854 above the code would die later anyway. If this assert
1855 happens, you just need to make the port support wide int. */
1856 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1857 #endif
1859 switch (code)
1861 case NOT:
1862 result = wi::bit_not (op0);
1863 break;
1865 case NEG:
1866 result = wi::neg (op0);
1867 break;
1869 case ABS:
1870 result = wi::abs (op0);
1871 break;
1873 case FFS:
1874 result = wi::shwi (wi::ffs (op0), result_mode);
1875 break;
1877 case CLZ:
1878 if (wi::ne_p (op0, 0))
1879 int_value = wi::clz (op0);
1880 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1881 int_value = GET_MODE_PRECISION (imode);
1882 result = wi::shwi (int_value, result_mode);
1883 break;
1885 case CLRSB:
1886 result = wi::shwi (wi::clrsb (op0), result_mode);
1887 break;
1889 case CTZ:
1890 if (wi::ne_p (op0, 0))
1891 int_value = wi::ctz (op0);
1892 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1893 int_value = GET_MODE_PRECISION (imode);
1894 result = wi::shwi (int_value, result_mode);
1895 break;
1897 case POPCOUNT:
1898 result = wi::shwi (wi::popcount (op0), result_mode);
1899 break;
1901 case PARITY:
1902 result = wi::shwi (wi::parity (op0), result_mode);
1903 break;
1905 case BSWAP:
1906 result = wide_int (op0).bswap ();
1907 break;
1909 case TRUNCATE:
1910 case ZERO_EXTEND:
1911 result = wide_int::from (op0, width, UNSIGNED);
1912 break;
1914 case SIGN_EXTEND:
1915 result = wide_int::from (op0, width, SIGNED);
1916 break;
1918 case SQRT:
1919 default:
1920 return 0;
1923 return immed_wide_int_const (result, result_mode);
1926 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1927 && SCALAR_FLOAT_MODE_P (mode)
1928 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1930 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1931 switch (code)
1933 case SQRT:
1934 return 0;
1935 case ABS:
1936 d = real_value_abs (&d);
1937 break;
1938 case NEG:
1939 d = real_value_negate (&d);
1940 break;
1941 case FLOAT_TRUNCATE:
1942 /* Don't perform the operation if flag_signaling_nans is on
1943 and the operand is a signaling NaN. */
1944 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1945 return NULL_RTX;
1946 d = real_value_truncate (mode, d);
1947 break;
1948 case FLOAT_EXTEND:
1949 /* Don't perform the operation if flag_signaling_nans is on
1950 and the operand is a signaling NaN. */
1951 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1952 return NULL_RTX;
1953 /* All this does is change the mode, unless changing
1954 mode class. */
1955 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1956 real_convert (&d, mode, &d);
1957 break;
1958 case FIX:
1959 /* Don't perform the operation if flag_signaling_nans is on
1960 and the operand is a signaling NaN. */
1961 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1962 return NULL_RTX;
1963 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1964 break;
1965 case NOT:
1967 long tmp[4];
1968 int i;
1970 real_to_target (tmp, &d, GET_MODE (op));
1971 for (i = 0; i < 4; i++)
1972 tmp[i] = ~tmp[i];
1973 real_from_target (&d, tmp, mode);
1974 break;
1976 default:
1977 gcc_unreachable ();
1979 return const_double_from_real_value (d, mode);
1981 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1982 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1983 && is_int_mode (mode, &result_mode))
1985 unsigned int width = GET_MODE_PRECISION (result_mode);
1986 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1987 operators are intentionally left unspecified (to ease implementation
1988 by target backends), for consistency, this routine implements the
1989 same semantics for constant folding as used by the middle-end. */
1991 /* This was formerly used only for non-IEEE float.
1992 eggert@twinsun.com says it is safe for IEEE also. */
1993 REAL_VALUE_TYPE t;
1994 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1995 wide_int wmax, wmin;
1996 /* This is part of the abi to real_to_integer, but we check
1997 things before making this call. */
1998 bool fail;
2000 switch (code)
2002 case FIX:
2003 if (REAL_VALUE_ISNAN (*x))
2004 return const0_rtx;
2006 /* Test against the signed upper bound. */
2007 wmax = wi::max_value (width, SIGNED);
2008 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2009 if (real_less (&t, x))
2010 return immed_wide_int_const (wmax, mode);
2012 /* Test against the signed lower bound. */
2013 wmin = wi::min_value (width, SIGNED);
2014 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2015 if (real_less (x, &t))
2016 return immed_wide_int_const (wmin, mode);
2018 return immed_wide_int_const (real_to_integer (x, &fail, width),
2019 mode);
2021 case UNSIGNED_FIX:
2022 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2023 return const0_rtx;
2025 /* Test against the unsigned upper bound. */
2026 wmax = wi::max_value (width, UNSIGNED);
2027 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2028 if (real_less (&t, x))
2029 return immed_wide_int_const (wmax, mode);
2031 return immed_wide_int_const (real_to_integer (x, &fail, width),
2032 mode);
2034 default:
2035 gcc_unreachable ();
2039 return NULL_RTX;
2042 /* Subroutine of simplify_binary_operation to simplify a binary operation
2043 CODE that can commute with byte swapping, with result mode MODE and
2044 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2045 Return zero if no simplification or canonicalization is possible. */
2047 static rtx
2048 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2049 rtx op0, rtx op1)
2051 rtx tem;
2053 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2054 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2056 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2057 simplify_gen_unary (BSWAP, mode, op1, mode));
2058 return simplify_gen_unary (BSWAP, mode, tem, mode);
2061 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2062 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2064 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2065 return simplify_gen_unary (BSWAP, mode, tem, mode);
2068 return NULL_RTX;
2071 /* Subroutine of simplify_binary_operation to simplify a commutative,
2072 associative binary operation CODE with result mode MODE, operating
2073 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2074 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2075 canonicalization is possible. */
2077 static rtx
2078 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2079 rtx op0, rtx op1)
2081 rtx tem;
2083 /* Linearize the operator to the left. */
2084 if (GET_CODE (op1) == code)
2086 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2087 if (GET_CODE (op0) == code)
2089 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2090 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2093 /* "a op (b op c)" becomes "(b op c) op a". */
2094 if (! swap_commutative_operands_p (op1, op0))
2095 return simplify_gen_binary (code, mode, op1, op0);
2097 std::swap (op0, op1);
2100 if (GET_CODE (op0) == code)
2102 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2103 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2105 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2106 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2109 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2110 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2111 if (tem != 0)
2112 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2114 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2115 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2116 if (tem != 0)
2117 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2120 return 0;
2124 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2125 and OP1. Return 0 if no simplification is possible.
2127 Don't use this for relational operations such as EQ or LT.
2128 Use simplify_relational_operation instead. */
2130 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2131 rtx op0, rtx op1)
2133 rtx trueop0, trueop1;
2134 rtx tem;
2136 /* Relational operations don't work here. We must know the mode
2137 of the operands in order to do the comparison correctly.
2138 Assuming a full word can give incorrect results.
2139 Consider comparing 128 with -128 in QImode. */
2140 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2141 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2143 /* Make sure the constant is second. */
2144 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2145 && swap_commutative_operands_p (op0, op1))
2146 std::swap (op0, op1);
2148 trueop0 = avoid_constant_pool_reference (op0);
2149 trueop1 = avoid_constant_pool_reference (op1);
2151 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2152 if (tem)
2153 return tem;
2154 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2156 if (tem)
2157 return tem;
2159 /* If the above steps did not result in a simplification and op0 or op1
2160 were constant pool references, use the referenced constants directly. */
2161 if (trueop0 != op0 || trueop1 != op1)
2162 return simplify_gen_binary (code, mode, trueop0, trueop1);
2164 return NULL_RTX;
2167 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2168 which OP0 and OP1 are both vector series or vector duplicates
2169 (which are really just series with a step of 0). If so, try to
2170 form a new series by applying CODE to the bases and to the steps.
2171 Return null if no simplification is possible.
2173 MODE is the mode of the operation and is known to be a vector
2174 integer mode. */
2176 static rtx
2177 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2178 rtx op0, rtx op1)
2180 rtx base0, step0;
2181 if (vec_duplicate_p (op0, &base0))
2182 step0 = const0_rtx;
2183 else if (!vec_series_p (op0, &base0, &step0))
2184 return NULL_RTX;
2186 rtx base1, step1;
2187 if (vec_duplicate_p (op1, &base1))
2188 step1 = const0_rtx;
2189 else if (!vec_series_p (op1, &base1, &step1))
2190 return NULL_RTX;
2192 /* Only create a new series if we can simplify both parts. In other
2193 cases this isn't really a simplification, and it's not necessarily
2194 a win to replace a vector operation with a scalar operation. */
2195 scalar_mode inner_mode = GET_MODE_INNER (mode);
2196 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2197 if (!new_base)
2198 return NULL_RTX;
2200 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2201 if (!new_step)
2202 return NULL_RTX;
2204 return gen_vec_series (mode, new_base, new_step);
2207 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2208 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2209 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2210 actual constants. */
2212 static rtx
2213 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2214 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2216 rtx tem, reversed, opleft, opright, elt0, elt1;
2217 HOST_WIDE_INT val;
2218 scalar_int_mode int_mode, inner_mode;
2220 /* Even if we can't compute a constant result,
2221 there are some cases worth simplifying. */
2223 switch (code)
2225 case PLUS:
2226 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2227 when x is NaN, infinite, or finite and nonzero. They aren't
2228 when x is -0 and the rounding mode is not towards -infinity,
2229 since (-0) + 0 is then 0. */
2230 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2231 return op0;
2233 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2234 transformations are safe even for IEEE. */
2235 if (GET_CODE (op0) == NEG)
2236 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2237 else if (GET_CODE (op1) == NEG)
2238 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2240 /* (~a) + 1 -> -a */
2241 if (INTEGRAL_MODE_P (mode)
2242 && GET_CODE (op0) == NOT
2243 && trueop1 == const1_rtx)
2244 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2246 /* Handle both-operands-constant cases. We can only add
2247 CONST_INTs to constants since the sum of relocatable symbols
2248 can't be handled by most assemblers. Don't add CONST_INT
2249 to CONST_INT since overflow won't be computed properly if wider
2250 than HOST_BITS_PER_WIDE_INT. */
2252 if ((GET_CODE (op0) == CONST
2253 || GET_CODE (op0) == SYMBOL_REF
2254 || GET_CODE (op0) == LABEL_REF)
2255 && CONST_INT_P (op1))
2256 return plus_constant (mode, op0, INTVAL (op1));
2257 else if ((GET_CODE (op1) == CONST
2258 || GET_CODE (op1) == SYMBOL_REF
2259 || GET_CODE (op1) == LABEL_REF)
2260 && CONST_INT_P (op0))
2261 return plus_constant (mode, op1, INTVAL (op0));
2263 /* See if this is something like X * C - X or vice versa or
2264 if the multiplication is written as a shift. If so, we can
2265 distribute and make a new multiply, shift, or maybe just
2266 have X (if C is 2 in the example above). But don't make
2267 something more expensive than we had before. */
2269 if (is_a <scalar_int_mode> (mode, &int_mode))
2271 rtx lhs = op0, rhs = op1;
2273 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2274 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2276 if (GET_CODE (lhs) == NEG)
2278 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2279 lhs = XEXP (lhs, 0);
2281 else if (GET_CODE (lhs) == MULT
2282 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2284 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2285 lhs = XEXP (lhs, 0);
2287 else if (GET_CODE (lhs) == ASHIFT
2288 && CONST_INT_P (XEXP (lhs, 1))
2289 && INTVAL (XEXP (lhs, 1)) >= 0
2290 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2292 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2293 GET_MODE_PRECISION (int_mode));
2294 lhs = XEXP (lhs, 0);
2297 if (GET_CODE (rhs) == NEG)
2299 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2300 rhs = XEXP (rhs, 0);
2302 else if (GET_CODE (rhs) == MULT
2303 && CONST_INT_P (XEXP (rhs, 1)))
2305 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2306 rhs = XEXP (rhs, 0);
2308 else if (GET_CODE (rhs) == ASHIFT
2309 && CONST_INT_P (XEXP (rhs, 1))
2310 && INTVAL (XEXP (rhs, 1)) >= 0
2311 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2313 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2314 GET_MODE_PRECISION (int_mode));
2315 rhs = XEXP (rhs, 0);
2318 if (rtx_equal_p (lhs, rhs))
2320 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2321 rtx coeff;
2322 bool speed = optimize_function_for_speed_p (cfun);
2324 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2326 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2327 return (set_src_cost (tem, int_mode, speed)
2328 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2332 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2333 if (CONST_SCALAR_INT_P (op1)
2334 && GET_CODE (op0) == XOR
2335 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2336 && mode_signbit_p (mode, op1))
2337 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2338 simplify_gen_binary (XOR, mode, op1,
2339 XEXP (op0, 1)));
2341 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2342 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2343 && GET_CODE (op0) == MULT
2344 && GET_CODE (XEXP (op0, 0)) == NEG)
2346 rtx in1, in2;
2348 in1 = XEXP (XEXP (op0, 0), 0);
2349 in2 = XEXP (op0, 1);
2350 return simplify_gen_binary (MINUS, mode, op1,
2351 simplify_gen_binary (MULT, mode,
2352 in1, in2));
2355 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2356 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2357 is 1. */
2358 if (COMPARISON_P (op0)
2359 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2360 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2361 && (reversed = reversed_comparison (op0, mode)))
2362 return
2363 simplify_gen_unary (NEG, mode, reversed, mode);
2365 /* If one of the operands is a PLUS or a MINUS, see if we can
2366 simplify this by the associative law.
2367 Don't use the associative law for floating point.
2368 The inaccuracy makes it nonassociative,
2369 and subtle programs can break if operations are associated. */
2371 if (INTEGRAL_MODE_P (mode)
2372 && (plus_minus_operand_p (op0)
2373 || plus_minus_operand_p (op1))
2374 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2375 return tem;
2377 /* Reassociate floating point addition only when the user
2378 specifies associative math operations. */
2379 if (FLOAT_MODE_P (mode)
2380 && flag_associative_math)
2382 tem = simplify_associative_operation (code, mode, op0, op1);
2383 if (tem)
2384 return tem;
2387 /* Handle vector series. */
2388 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2390 tem = simplify_binary_operation_series (code, mode, op0, op1);
2391 if (tem)
2392 return tem;
2394 break;
2396 case COMPARE:
2397 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2398 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2399 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2400 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2402 rtx xop00 = XEXP (op0, 0);
2403 rtx xop10 = XEXP (op1, 0);
2405 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2406 return xop00;
2408 if (REG_P (xop00) && REG_P (xop10)
2409 && REGNO (xop00) == REGNO (xop10)
2410 && GET_MODE (xop00) == mode
2411 && GET_MODE (xop10) == mode
2412 && GET_MODE_CLASS (mode) == MODE_CC)
2413 return xop00;
2415 break;
2417 case MINUS:
2418 /* We can't assume x-x is 0 even with non-IEEE floating point,
2419 but since it is zero except in very strange circumstances, we
2420 will treat it as zero with -ffinite-math-only. */
2421 if (rtx_equal_p (trueop0, trueop1)
2422 && ! side_effects_p (op0)
2423 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2424 return CONST0_RTX (mode);
2426 /* Change subtraction from zero into negation. (0 - x) is the
2427 same as -x when x is NaN, infinite, or finite and nonzero.
2428 But if the mode has signed zeros, and does not round towards
2429 -infinity, then 0 - 0 is 0, not -0. */
2430 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2431 return simplify_gen_unary (NEG, mode, op1, mode);
2433 /* (-1 - a) is ~a, unless the expression contains symbolic
2434 constants, in which case not retaining additions and
2435 subtractions could cause invalid assembly to be produced. */
2436 if (trueop0 == constm1_rtx
2437 && !contains_symbolic_reference_p (op1))
2438 return simplify_gen_unary (NOT, mode, op1, mode);
2440 /* Subtracting 0 has no effect unless the mode has signed zeros
2441 and supports rounding towards -infinity. In such a case,
2442 0 - 0 is -0. */
2443 if (!(HONOR_SIGNED_ZEROS (mode)
2444 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2445 && trueop1 == CONST0_RTX (mode))
2446 return op0;
2448 /* See if this is something like X * C - X or vice versa or
2449 if the multiplication is written as a shift. If so, we can
2450 distribute and make a new multiply, shift, or maybe just
2451 have X (if C is 2 in the example above). But don't make
2452 something more expensive than we had before. */
2454 if (is_a <scalar_int_mode> (mode, &int_mode))
2456 rtx lhs = op0, rhs = op1;
2458 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2459 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2461 if (GET_CODE (lhs) == NEG)
2463 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2464 lhs = XEXP (lhs, 0);
2466 else if (GET_CODE (lhs) == MULT
2467 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2469 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2470 lhs = XEXP (lhs, 0);
2472 else if (GET_CODE (lhs) == ASHIFT
2473 && CONST_INT_P (XEXP (lhs, 1))
2474 && INTVAL (XEXP (lhs, 1)) >= 0
2475 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2477 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2478 GET_MODE_PRECISION (int_mode));
2479 lhs = XEXP (lhs, 0);
2482 if (GET_CODE (rhs) == NEG)
2484 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2485 rhs = XEXP (rhs, 0);
2487 else if (GET_CODE (rhs) == MULT
2488 && CONST_INT_P (XEXP (rhs, 1)))
2490 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2491 rhs = XEXP (rhs, 0);
2493 else if (GET_CODE (rhs) == ASHIFT
2494 && CONST_INT_P (XEXP (rhs, 1))
2495 && INTVAL (XEXP (rhs, 1)) >= 0
2496 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2498 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2499 GET_MODE_PRECISION (int_mode));
2500 negcoeff1 = -negcoeff1;
2501 rhs = XEXP (rhs, 0);
2504 if (rtx_equal_p (lhs, rhs))
2506 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2507 rtx coeff;
2508 bool speed = optimize_function_for_speed_p (cfun);
2510 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2512 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2513 return (set_src_cost (tem, int_mode, speed)
2514 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2518 /* (a - (-b)) -> (a + b). True even for IEEE. */
2519 if (GET_CODE (op1) == NEG)
2520 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2522 /* (-x - c) may be simplified as (-c - x). */
2523 if (GET_CODE (op0) == NEG
2524 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2526 tem = simplify_unary_operation (NEG, mode, op1, mode);
2527 if (tem)
2528 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2531 /* Don't let a relocatable value get a negative coeff. */
2532 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2533 return simplify_gen_binary (PLUS, mode,
2534 op0,
2535 neg_const_int (mode, op1));
2537 /* (x - (x & y)) -> (x & ~y) */
2538 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2540 if (rtx_equal_p (op0, XEXP (op1, 0)))
2542 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2543 GET_MODE (XEXP (op1, 1)));
2544 return simplify_gen_binary (AND, mode, op0, tem);
2546 if (rtx_equal_p (op0, XEXP (op1, 1)))
2548 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2549 GET_MODE (XEXP (op1, 0)));
2550 return simplify_gen_binary (AND, mode, op0, tem);
2554 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2555 by reversing the comparison code if valid. */
2556 if (STORE_FLAG_VALUE == 1
2557 && trueop0 == const1_rtx
2558 && COMPARISON_P (op1)
2559 && (reversed = reversed_comparison (op1, mode)))
2560 return reversed;
2562 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2563 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2564 && GET_CODE (op1) == MULT
2565 && GET_CODE (XEXP (op1, 0)) == NEG)
2567 rtx in1, in2;
2569 in1 = XEXP (XEXP (op1, 0), 0);
2570 in2 = XEXP (op1, 1);
2571 return simplify_gen_binary (PLUS, mode,
2572 simplify_gen_binary (MULT, mode,
2573 in1, in2),
2574 op0);
2577 /* Canonicalize (minus (neg A) (mult B C)) to
2578 (minus (mult (neg B) C) A). */
2579 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2580 && GET_CODE (op1) == MULT
2581 && GET_CODE (op0) == NEG)
2583 rtx in1, in2;
2585 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2586 in2 = XEXP (op1, 1);
2587 return simplify_gen_binary (MINUS, mode,
2588 simplify_gen_binary (MULT, mode,
2589 in1, in2),
2590 XEXP (op0, 0));
2593 /* If one of the operands is a PLUS or a MINUS, see if we can
2594 simplify this by the associative law. This will, for example,
2595 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2596 Don't use the associative law for floating point.
2597 The inaccuracy makes it nonassociative,
2598 and subtle programs can break if operations are associated. */
2600 if (INTEGRAL_MODE_P (mode)
2601 && (plus_minus_operand_p (op0)
2602 || plus_minus_operand_p (op1))
2603 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2604 return tem;
2606 /* Handle vector series. */
2607 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2609 tem = simplify_binary_operation_series (code, mode, op0, op1);
2610 if (tem)
2611 return tem;
2613 break;
2615 case MULT:
2616 if (trueop1 == constm1_rtx)
2617 return simplify_gen_unary (NEG, mode, op0, mode);
2619 if (GET_CODE (op0) == NEG)
2621 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2622 /* If op1 is a MULT as well and simplify_unary_operation
2623 just moved the NEG to the second operand, simplify_gen_binary
2624 below could through simplify_associative_operation move
2625 the NEG around again and recurse endlessly. */
2626 if (temp
2627 && GET_CODE (op1) == MULT
2628 && GET_CODE (temp) == MULT
2629 && XEXP (op1, 0) == XEXP (temp, 0)
2630 && GET_CODE (XEXP (temp, 1)) == NEG
2631 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2632 temp = NULL_RTX;
2633 if (temp)
2634 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2636 if (GET_CODE (op1) == NEG)
2638 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2639 /* If op0 is a MULT as well and simplify_unary_operation
2640 just moved the NEG to the second operand, simplify_gen_binary
2641 below could through simplify_associative_operation move
2642 the NEG around again and recurse endlessly. */
2643 if (temp
2644 && GET_CODE (op0) == MULT
2645 && GET_CODE (temp) == MULT
2646 && XEXP (op0, 0) == XEXP (temp, 0)
2647 && GET_CODE (XEXP (temp, 1)) == NEG
2648 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2649 temp = NULL_RTX;
2650 if (temp)
2651 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2654 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2655 x is NaN, since x * 0 is then also NaN. Nor is it valid
2656 when the mode has signed zeros, since multiplying a negative
2657 number by 0 will give -0, not 0. */
2658 if (!HONOR_NANS (mode)
2659 && !HONOR_SIGNED_ZEROS (mode)
2660 && trueop1 == CONST0_RTX (mode)
2661 && ! side_effects_p (op0))
2662 return op1;
2664 /* In IEEE floating point, x*1 is not equivalent to x for
2665 signalling NaNs. */
2666 if (!HONOR_SNANS (mode)
2667 && trueop1 == CONST1_RTX (mode))
2668 return op0;
2670 /* Convert multiply by constant power of two into shift. */
2671 if (CONST_SCALAR_INT_P (trueop1))
2673 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2674 if (val >= 0)
2675 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2678 /* x*2 is x+x and x*(-1) is -x */
2679 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2680 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2681 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2682 && GET_MODE (op0) == mode)
2684 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2686 if (real_equal (d1, &dconst2))
2687 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2689 if (!HONOR_SNANS (mode)
2690 && real_equal (d1, &dconstm1))
2691 return simplify_gen_unary (NEG, mode, op0, mode);
2694 /* Optimize -x * -x as x * x. */
2695 if (FLOAT_MODE_P (mode)
2696 && GET_CODE (op0) == NEG
2697 && GET_CODE (op1) == NEG
2698 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2699 && !side_effects_p (XEXP (op0, 0)))
2700 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2702 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2703 if (SCALAR_FLOAT_MODE_P (mode)
2704 && GET_CODE (op0) == ABS
2705 && GET_CODE (op1) == ABS
2706 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2707 && !side_effects_p (XEXP (op0, 0)))
2708 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2710 /* Reassociate multiplication, but for floating point MULTs
2711 only when the user specifies unsafe math optimizations. */
2712 if (! FLOAT_MODE_P (mode)
2713 || flag_unsafe_math_optimizations)
2715 tem = simplify_associative_operation (code, mode, op0, op1);
2716 if (tem)
2717 return tem;
2719 break;
2721 case IOR:
2722 if (trueop1 == CONST0_RTX (mode))
2723 return op0;
2724 if (INTEGRAL_MODE_P (mode)
2725 && trueop1 == CONSTM1_RTX (mode)
2726 && !side_effects_p (op0))
2727 return op1;
2728 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2729 return op0;
2730 /* A | (~A) -> -1 */
2731 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2732 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2733 && ! side_effects_p (op0)
2734 && SCALAR_INT_MODE_P (mode))
2735 return constm1_rtx;
2737 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2738 if (CONST_INT_P (op1)
2739 && HWI_COMPUTABLE_MODE_P (mode)
2740 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2741 && !side_effects_p (op0))
2742 return op1;
2744 /* Canonicalize (X & C1) | C2. */
2745 if (GET_CODE (op0) == AND
2746 && CONST_INT_P (trueop1)
2747 && CONST_INT_P (XEXP (op0, 1)))
2749 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2750 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2751 HOST_WIDE_INT c2 = INTVAL (trueop1);
2753 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2754 if ((c1 & c2) == c1
2755 && !side_effects_p (XEXP (op0, 0)))
2756 return trueop1;
2758 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2759 if (((c1|c2) & mask) == mask)
2760 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2763 /* Convert (A & B) | A to A. */
2764 if (GET_CODE (op0) == AND
2765 && (rtx_equal_p (XEXP (op0, 0), op1)
2766 || rtx_equal_p (XEXP (op0, 1), op1))
2767 && ! side_effects_p (XEXP (op0, 0))
2768 && ! side_effects_p (XEXP (op0, 1)))
2769 return op1;
2771 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2772 mode size to (rotate A CX). */
2774 if (GET_CODE (op1) == ASHIFT
2775 || GET_CODE (op1) == SUBREG)
2777 opleft = op1;
2778 opright = op0;
2780 else
2782 opright = op1;
2783 opleft = op0;
2786 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2787 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2788 && CONST_INT_P (XEXP (opleft, 1))
2789 && CONST_INT_P (XEXP (opright, 1))
2790 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2791 == GET_MODE_UNIT_PRECISION (mode)))
2792 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2794 /* Same, but for ashift that has been "simplified" to a wider mode
2795 by simplify_shift_const. */
2797 if (GET_CODE (opleft) == SUBREG
2798 && is_a <scalar_int_mode> (mode, &int_mode)
2799 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2800 &inner_mode)
2801 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2802 && GET_CODE (opright) == LSHIFTRT
2803 && GET_CODE (XEXP (opright, 0)) == SUBREG
2804 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2805 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2806 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2807 SUBREG_REG (XEXP (opright, 0)))
2808 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2809 && CONST_INT_P (XEXP (opright, 1))
2810 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2811 + INTVAL (XEXP (opright, 1))
2812 == GET_MODE_PRECISION (int_mode)))
2813 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2814 XEXP (SUBREG_REG (opleft), 1));
2816 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2817 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2818 the PLUS does not affect any of the bits in OP1: then we can do
2819 the IOR as a PLUS and we can associate. This is valid if OP1
2820 can be safely shifted left C bits. */
2821 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2822 && GET_CODE (XEXP (op0, 0)) == PLUS
2823 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2824 && CONST_INT_P (XEXP (op0, 1))
2825 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2827 int count = INTVAL (XEXP (op0, 1));
2828 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2830 if (mask >> count == INTVAL (trueop1)
2831 && trunc_int_for_mode (mask, mode) == mask
2832 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2833 return simplify_gen_binary (ASHIFTRT, mode,
2834 plus_constant (mode, XEXP (op0, 0),
2835 mask),
2836 XEXP (op0, 1));
2839 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2840 if (tem)
2841 return tem;
2843 tem = simplify_associative_operation (code, mode, op0, op1);
2844 if (tem)
2845 return tem;
2846 break;
2848 case XOR:
2849 if (trueop1 == CONST0_RTX (mode))
2850 return op0;
2851 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2852 return simplify_gen_unary (NOT, mode, op0, mode);
2853 if (rtx_equal_p (trueop0, trueop1)
2854 && ! side_effects_p (op0)
2855 && GET_MODE_CLASS (mode) != MODE_CC)
2856 return CONST0_RTX (mode);
2858 /* Canonicalize XOR of the most significant bit to PLUS. */
2859 if (CONST_SCALAR_INT_P (op1)
2860 && mode_signbit_p (mode, op1))
2861 return simplify_gen_binary (PLUS, mode, op0, op1);
2862 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2863 if (CONST_SCALAR_INT_P (op1)
2864 && GET_CODE (op0) == PLUS
2865 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2866 && mode_signbit_p (mode, XEXP (op0, 1)))
2867 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2868 simplify_gen_binary (XOR, mode, op1,
2869 XEXP (op0, 1)));
2871 /* If we are XORing two things that have no bits in common,
2872 convert them into an IOR. This helps to detect rotation encoded
2873 using those methods and possibly other simplifications. */
2875 if (HWI_COMPUTABLE_MODE_P (mode)
2876 && (nonzero_bits (op0, mode)
2877 & nonzero_bits (op1, mode)) == 0)
2878 return (simplify_gen_binary (IOR, mode, op0, op1));
2880 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2881 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2882 (NOT y). */
2884 int num_negated = 0;
2886 if (GET_CODE (op0) == NOT)
2887 num_negated++, op0 = XEXP (op0, 0);
2888 if (GET_CODE (op1) == NOT)
2889 num_negated++, op1 = XEXP (op1, 0);
2891 if (num_negated == 2)
2892 return simplify_gen_binary (XOR, mode, op0, op1);
2893 else if (num_negated == 1)
2894 return simplify_gen_unary (NOT, mode,
2895 simplify_gen_binary (XOR, mode, op0, op1),
2896 mode);
2899 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2900 correspond to a machine insn or result in further simplifications
2901 if B is a constant. */
2903 if (GET_CODE (op0) == AND
2904 && rtx_equal_p (XEXP (op0, 1), op1)
2905 && ! side_effects_p (op1))
2906 return simplify_gen_binary (AND, mode,
2907 simplify_gen_unary (NOT, mode,
2908 XEXP (op0, 0), mode),
2909 op1);
2911 else if (GET_CODE (op0) == AND
2912 && rtx_equal_p (XEXP (op0, 0), op1)
2913 && ! side_effects_p (op1))
2914 return simplify_gen_binary (AND, mode,
2915 simplify_gen_unary (NOT, mode,
2916 XEXP (op0, 1), mode),
2917 op1);
2919 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2920 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2921 out bits inverted twice and not set by C. Similarly, given
2922 (xor (and (xor A B) C) D), simplify without inverting C in
2923 the xor operand: (xor (and A C) (B&C)^D).
2925 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2926 && GET_CODE (XEXP (op0, 0)) == XOR
2927 && CONST_INT_P (op1)
2928 && CONST_INT_P (XEXP (op0, 1))
2929 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2931 enum rtx_code op = GET_CODE (op0);
2932 rtx a = XEXP (XEXP (op0, 0), 0);
2933 rtx b = XEXP (XEXP (op0, 0), 1);
2934 rtx c = XEXP (op0, 1);
2935 rtx d = op1;
2936 HOST_WIDE_INT bval = INTVAL (b);
2937 HOST_WIDE_INT cval = INTVAL (c);
2938 HOST_WIDE_INT dval = INTVAL (d);
2939 HOST_WIDE_INT xcval;
2941 if (op == IOR)
2942 xcval = ~cval;
2943 else
2944 xcval = cval;
2946 return simplify_gen_binary (XOR, mode,
2947 simplify_gen_binary (op, mode, a, c),
2948 gen_int_mode ((bval & xcval) ^ dval,
2949 mode));
2952 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2953 we can transform like this:
2954 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2955 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2956 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2957 Attempt a few simplifications when B and C are both constants. */
2958 if (GET_CODE (op0) == AND
2959 && CONST_INT_P (op1)
2960 && CONST_INT_P (XEXP (op0, 1)))
2962 rtx a = XEXP (op0, 0);
2963 rtx b = XEXP (op0, 1);
2964 rtx c = op1;
2965 HOST_WIDE_INT bval = INTVAL (b);
2966 HOST_WIDE_INT cval = INTVAL (c);
2968 /* Instead of computing ~A&C, we compute its negated value,
2969 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2970 optimize for sure. If it does not simplify, we still try
2971 to compute ~A&C below, but since that always allocates
2972 RTL, we don't try that before committing to returning a
2973 simplified expression. */
2974 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2975 GEN_INT (~cval));
2977 if ((~cval & bval) == 0)
2979 rtx na_c = NULL_RTX;
2980 if (n_na_c)
2981 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2982 else
2984 /* If ~A does not simplify, don't bother: we don't
2985 want to simplify 2 operations into 3, and if na_c
2986 were to simplify with na, n_na_c would have
2987 simplified as well. */
2988 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2989 if (na)
2990 na_c = simplify_gen_binary (AND, mode, na, c);
2993 /* Try to simplify ~A&C | ~B&C. */
2994 if (na_c != NULL_RTX)
2995 return simplify_gen_binary (IOR, mode, na_c,
2996 gen_int_mode (~bval & cval, mode));
2998 else
3000 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3001 if (n_na_c == CONSTM1_RTX (mode))
3003 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3004 gen_int_mode (~cval & bval,
3005 mode));
3006 return simplify_gen_binary (IOR, mode, a_nc_b,
3007 gen_int_mode (~bval & cval,
3008 mode));
3013 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3014 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3015 machines, and also has shorter instruction path length. */
3016 if (GET_CODE (op0) == AND
3017 && GET_CODE (XEXP (op0, 0)) == XOR
3018 && CONST_INT_P (XEXP (op0, 1))
3019 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3021 rtx a = trueop1;
3022 rtx b = XEXP (XEXP (op0, 0), 1);
3023 rtx c = XEXP (op0, 1);
3024 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3025 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3026 rtx bc = simplify_gen_binary (AND, mode, b, c);
3027 return simplify_gen_binary (IOR, mode, a_nc, bc);
3029 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3030 else if (GET_CODE (op0) == AND
3031 && GET_CODE (XEXP (op0, 0)) == XOR
3032 && CONST_INT_P (XEXP (op0, 1))
3033 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3035 rtx a = XEXP (XEXP (op0, 0), 0);
3036 rtx b = trueop1;
3037 rtx c = XEXP (op0, 1);
3038 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3039 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3040 rtx ac = simplify_gen_binary (AND, mode, a, c);
3041 return simplify_gen_binary (IOR, mode, ac, b_nc);
3044 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3045 comparison if STORE_FLAG_VALUE is 1. */
3046 if (STORE_FLAG_VALUE == 1
3047 && trueop1 == const1_rtx
3048 && COMPARISON_P (op0)
3049 && (reversed = reversed_comparison (op0, mode)))
3050 return reversed;
3052 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3053 is (lt foo (const_int 0)), so we can perform the above
3054 simplification if STORE_FLAG_VALUE is 1. */
3056 if (is_a <scalar_int_mode> (mode, &int_mode)
3057 && STORE_FLAG_VALUE == 1
3058 && trueop1 == const1_rtx
3059 && GET_CODE (op0) == LSHIFTRT
3060 && CONST_INT_P (XEXP (op0, 1))
3061 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3062 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3064 /* (xor (comparison foo bar) (const_int sign-bit))
3065 when STORE_FLAG_VALUE is the sign bit. */
3066 if (is_a <scalar_int_mode> (mode, &int_mode)
3067 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3068 && trueop1 == const_true_rtx
3069 && COMPARISON_P (op0)
3070 && (reversed = reversed_comparison (op0, int_mode)))
3071 return reversed;
3073 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3074 if (tem)
3075 return tem;
3077 tem = simplify_associative_operation (code, mode, op0, op1);
3078 if (tem)
3079 return tem;
3080 break;
3082 case AND:
3083 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3084 return trueop1;
3085 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3086 return op0;
3087 if (HWI_COMPUTABLE_MODE_P (mode))
3089 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3090 HOST_WIDE_INT nzop1;
3091 if (CONST_INT_P (trueop1))
3093 HOST_WIDE_INT val1 = INTVAL (trueop1);
3094 /* If we are turning off bits already known off in OP0, we need
3095 not do an AND. */
3096 if ((nzop0 & ~val1) == 0)
3097 return op0;
3099 nzop1 = nonzero_bits (trueop1, mode);
3100 /* If we are clearing all the nonzero bits, the result is zero. */
3101 if ((nzop1 & nzop0) == 0
3102 && !side_effects_p (op0) && !side_effects_p (op1))
3103 return CONST0_RTX (mode);
3105 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3106 && GET_MODE_CLASS (mode) != MODE_CC)
3107 return op0;
3108 /* A & (~A) -> 0 */
3109 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3110 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3111 && ! side_effects_p (op0)
3112 && GET_MODE_CLASS (mode) != MODE_CC)
3113 return CONST0_RTX (mode);
3115 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3116 there are no nonzero bits of C outside of X's mode. */
3117 if ((GET_CODE (op0) == SIGN_EXTEND
3118 || GET_CODE (op0) == ZERO_EXTEND)
3119 && CONST_INT_P (trueop1)
3120 && HWI_COMPUTABLE_MODE_P (mode)
3121 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3122 & UINTVAL (trueop1)) == 0)
3124 machine_mode imode = GET_MODE (XEXP (op0, 0));
3125 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3126 gen_int_mode (INTVAL (trueop1),
3127 imode));
3128 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3131 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3132 we might be able to further simplify the AND with X and potentially
3133 remove the truncation altogether. */
3134 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3136 rtx x = XEXP (op0, 0);
3137 machine_mode xmode = GET_MODE (x);
3138 tem = simplify_gen_binary (AND, xmode, x,
3139 gen_int_mode (INTVAL (trueop1), xmode));
3140 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3143 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3144 if (GET_CODE (op0) == IOR
3145 && CONST_INT_P (trueop1)
3146 && CONST_INT_P (XEXP (op0, 1)))
3148 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3149 return simplify_gen_binary (IOR, mode,
3150 simplify_gen_binary (AND, mode,
3151 XEXP (op0, 0), op1),
3152 gen_int_mode (tmp, mode));
3155 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3156 insn (and may simplify more). */
3157 if (GET_CODE (op0) == XOR
3158 && rtx_equal_p (XEXP (op0, 0), op1)
3159 && ! side_effects_p (op1))
3160 return simplify_gen_binary (AND, mode,
3161 simplify_gen_unary (NOT, mode,
3162 XEXP (op0, 1), mode),
3163 op1);
3165 if (GET_CODE (op0) == XOR
3166 && rtx_equal_p (XEXP (op0, 1), op1)
3167 && ! side_effects_p (op1))
3168 return simplify_gen_binary (AND, mode,
3169 simplify_gen_unary (NOT, mode,
3170 XEXP (op0, 0), mode),
3171 op1);
3173 /* Similarly for (~(A ^ B)) & A. */
3174 if (GET_CODE (op0) == NOT
3175 && GET_CODE (XEXP (op0, 0)) == XOR
3176 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3177 && ! side_effects_p (op1))
3178 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3180 if (GET_CODE (op0) == NOT
3181 && GET_CODE (XEXP (op0, 0)) == XOR
3182 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3183 && ! side_effects_p (op1))
3184 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3186 /* Convert (A | B) & A to A. */
3187 if (GET_CODE (op0) == IOR
3188 && (rtx_equal_p (XEXP (op0, 0), op1)
3189 || rtx_equal_p (XEXP (op0, 1), op1))
3190 && ! side_effects_p (XEXP (op0, 0))
3191 && ! side_effects_p (XEXP (op0, 1)))
3192 return op1;
3194 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3195 ((A & N) + B) & M -> (A + B) & M
3196 Similarly if (N & M) == 0,
3197 ((A | N) + B) & M -> (A + B) & M
3198 and for - instead of + and/or ^ instead of |.
3199 Also, if (N & M) == 0, then
3200 (A +- N) & M -> A & M. */
3201 if (CONST_INT_P (trueop1)
3202 && HWI_COMPUTABLE_MODE_P (mode)
3203 && ~UINTVAL (trueop1)
3204 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3205 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3207 rtx pmop[2];
3208 int which;
3210 pmop[0] = XEXP (op0, 0);
3211 pmop[1] = XEXP (op0, 1);
3213 if (CONST_INT_P (pmop[1])
3214 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3215 return simplify_gen_binary (AND, mode, pmop[0], op1);
3217 for (which = 0; which < 2; which++)
3219 tem = pmop[which];
3220 switch (GET_CODE (tem))
3222 case AND:
3223 if (CONST_INT_P (XEXP (tem, 1))
3224 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3225 == UINTVAL (trueop1))
3226 pmop[which] = XEXP (tem, 0);
3227 break;
3228 case IOR:
3229 case XOR:
3230 if (CONST_INT_P (XEXP (tem, 1))
3231 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3232 pmop[which] = XEXP (tem, 0);
3233 break;
3234 default:
3235 break;
3239 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3241 tem = simplify_gen_binary (GET_CODE (op0), mode,
3242 pmop[0], pmop[1]);
3243 return simplify_gen_binary (code, mode, tem, op1);
3247 /* (and X (ior (not X) Y) -> (and X Y) */
3248 if (GET_CODE (op1) == IOR
3249 && GET_CODE (XEXP (op1, 0)) == NOT
3250 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3251 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3253 /* (and (ior (not X) Y) X) -> (and X Y) */
3254 if (GET_CODE (op0) == IOR
3255 && GET_CODE (XEXP (op0, 0)) == NOT
3256 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3257 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3259 /* (and X (ior Y (not X)) -> (and X Y) */
3260 if (GET_CODE (op1) == IOR
3261 && GET_CODE (XEXP (op1, 1)) == NOT
3262 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3263 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3265 /* (and (ior Y (not X)) X) -> (and X Y) */
3266 if (GET_CODE (op0) == IOR
3267 && GET_CODE (XEXP (op0, 1)) == NOT
3268 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3269 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3271 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3272 if (tem)
3273 return tem;
3275 tem = simplify_associative_operation (code, mode, op0, op1);
3276 if (tem)
3277 return tem;
3278 break;
3280 case UDIV:
3281 /* 0/x is 0 (or x&0 if x has side-effects). */
3282 if (trueop0 == CONST0_RTX (mode)
3283 && !cfun->can_throw_non_call_exceptions)
3285 if (side_effects_p (op1))
3286 return simplify_gen_binary (AND, mode, op1, trueop0);
3287 return trueop0;
3289 /* x/1 is x. */
3290 if (trueop1 == CONST1_RTX (mode))
3292 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3293 if (tem)
3294 return tem;
3296 /* Convert divide by power of two into shift. */
3297 if (CONST_INT_P (trueop1)
3298 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3299 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3300 break;
3302 case DIV:
3303 /* Handle floating point and integers separately. */
3304 if (SCALAR_FLOAT_MODE_P (mode))
3306 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3307 safe for modes with NaNs, since 0.0 / 0.0 will then be
3308 NaN rather than 0.0. Nor is it safe for modes with signed
3309 zeros, since dividing 0 by a negative number gives -0.0 */
3310 if (trueop0 == CONST0_RTX (mode)
3311 && !HONOR_NANS (mode)
3312 && !HONOR_SIGNED_ZEROS (mode)
3313 && ! side_effects_p (op1))
3314 return op0;
3315 /* x/1.0 is x. */
3316 if (trueop1 == CONST1_RTX (mode)
3317 && !HONOR_SNANS (mode))
3318 return op0;
3320 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3321 && trueop1 != CONST0_RTX (mode))
3323 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3325 /* x/-1.0 is -x. */
3326 if (real_equal (d1, &dconstm1)
3327 && !HONOR_SNANS (mode))
3328 return simplify_gen_unary (NEG, mode, op0, mode);
3330 /* Change FP division by a constant into multiplication.
3331 Only do this with -freciprocal-math. */
3332 if (flag_reciprocal_math
3333 && !real_equal (d1, &dconst0))
3335 REAL_VALUE_TYPE d;
3336 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3337 tem = const_double_from_real_value (d, mode);
3338 return simplify_gen_binary (MULT, mode, op0, tem);
3342 else if (SCALAR_INT_MODE_P (mode))
3344 /* 0/x is 0 (or x&0 if x has side-effects). */
3345 if (trueop0 == CONST0_RTX (mode)
3346 && !cfun->can_throw_non_call_exceptions)
3348 if (side_effects_p (op1))
3349 return simplify_gen_binary (AND, mode, op1, trueop0);
3350 return trueop0;
3352 /* x/1 is x. */
3353 if (trueop1 == CONST1_RTX (mode))
3355 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3356 if (tem)
3357 return tem;
3359 /* x/-1 is -x. */
3360 if (trueop1 == constm1_rtx)
3362 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3363 if (x)
3364 return simplify_gen_unary (NEG, mode, x, mode);
3367 break;
3369 case UMOD:
3370 /* 0%x is 0 (or x&0 if x has side-effects). */
3371 if (trueop0 == CONST0_RTX (mode))
3373 if (side_effects_p (op1))
3374 return simplify_gen_binary (AND, mode, op1, trueop0);
3375 return trueop0;
3377 /* x%1 is 0 (of x&0 if x has side-effects). */
3378 if (trueop1 == CONST1_RTX (mode))
3380 if (side_effects_p (op0))
3381 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3382 return CONST0_RTX (mode);
3384 /* Implement modulus by power of two as AND. */
3385 if (CONST_INT_P (trueop1)
3386 && exact_log2 (UINTVAL (trueop1)) > 0)
3387 return simplify_gen_binary (AND, mode, op0,
3388 gen_int_mode (INTVAL (op1) - 1, mode));
3389 break;
3391 case MOD:
3392 /* 0%x is 0 (or x&0 if x has side-effects). */
3393 if (trueop0 == CONST0_RTX (mode))
3395 if (side_effects_p (op1))
3396 return simplify_gen_binary (AND, mode, op1, trueop0);
3397 return trueop0;
3399 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3400 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3402 if (side_effects_p (op0))
3403 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3404 return CONST0_RTX (mode);
3406 break;
3408 case ROTATERT:
3409 case ROTATE:
3410 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3411 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3412 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3413 amount instead. */
3414 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3415 if (CONST_INT_P (trueop1)
3416 && IN_RANGE (INTVAL (trueop1),
3417 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3418 GET_MODE_UNIT_PRECISION (mode) - 1))
3419 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3420 mode, op0,
3421 GEN_INT (GET_MODE_UNIT_PRECISION (mode)
3422 - INTVAL (trueop1)));
3423 #endif
3424 /* FALLTHRU */
3425 case ASHIFTRT:
3426 if (trueop1 == CONST0_RTX (mode))
3427 return op0;
3428 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3429 return op0;
3430 /* Rotating ~0 always results in ~0. */
3431 if (CONST_INT_P (trueop0)
3432 && HWI_COMPUTABLE_MODE_P (mode)
3433 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3434 && ! side_effects_p (op1))
3435 return op0;
3437 canonicalize_shift:
3438 /* Given:
3439 scalar modes M1, M2
3440 scalar constants c1, c2
3441 size (M2) > size (M1)
3442 c1 == size (M2) - size (M1)
3443 optimize:
3444 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3445 <low_part>)
3446 (const_int <c2>))
3448 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3449 <low_part>). */
3450 if ((code == ASHIFTRT || code == LSHIFTRT)
3451 && is_a <scalar_int_mode> (mode, &int_mode)
3452 && SUBREG_P (op0)
3453 && CONST_INT_P (op1)
3454 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3455 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3456 &inner_mode)
3457 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3458 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3459 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3460 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3461 && subreg_lowpart_p (op0))
3463 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3464 + INTVAL (op1));
3465 tmp = simplify_gen_binary (code, inner_mode,
3466 XEXP (SUBREG_REG (op0), 0),
3467 tmp);
3468 return lowpart_subreg (int_mode, tmp, inner_mode);
3471 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3473 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3474 if (val != INTVAL (op1))
3475 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3477 break;
3479 case ASHIFT:
3480 case SS_ASHIFT:
3481 case US_ASHIFT:
3482 if (trueop1 == CONST0_RTX (mode))
3483 return op0;
3484 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3485 return op0;
3486 goto canonicalize_shift;
3488 case LSHIFTRT:
3489 if (trueop1 == CONST0_RTX (mode))
3490 return op0;
3491 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3492 return op0;
3493 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3494 if (GET_CODE (op0) == CLZ
3495 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3496 && CONST_INT_P (trueop1)
3497 && STORE_FLAG_VALUE == 1
3498 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3500 unsigned HOST_WIDE_INT zero_val = 0;
3502 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3503 && zero_val == GET_MODE_PRECISION (inner_mode)
3504 && INTVAL (trueop1) == exact_log2 (zero_val))
3505 return simplify_gen_relational (EQ, mode, inner_mode,
3506 XEXP (op0, 0), const0_rtx);
3508 goto canonicalize_shift;
3510 case SMIN:
3511 if (HWI_COMPUTABLE_MODE_P (mode)
3512 && mode_signbit_p (mode, trueop1)
3513 && ! side_effects_p (op0))
3514 return op1;
3515 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3516 return op0;
3517 tem = simplify_associative_operation (code, mode, op0, op1);
3518 if (tem)
3519 return tem;
3520 break;
3522 case SMAX:
3523 if (HWI_COMPUTABLE_MODE_P (mode)
3524 && CONST_INT_P (trueop1)
3525 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3526 && ! side_effects_p (op0))
3527 return op1;
3528 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3529 return op0;
3530 tem = simplify_associative_operation (code, mode, op0, op1);
3531 if (tem)
3532 return tem;
3533 break;
3535 case UMIN:
3536 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3537 return op1;
3538 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3539 return op0;
3540 tem = simplify_associative_operation (code, mode, op0, op1);
3541 if (tem)
3542 return tem;
3543 break;
3545 case UMAX:
3546 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3547 return op1;
3548 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3549 return op0;
3550 tem = simplify_associative_operation (code, mode, op0, op1);
3551 if (tem)
3552 return tem;
3553 break;
3555 case SS_PLUS:
3556 case US_PLUS:
3557 case SS_MINUS:
3558 case US_MINUS:
3559 case SS_MULT:
3560 case US_MULT:
3561 case SS_DIV:
3562 case US_DIV:
3563 /* ??? There are simplifications that can be done. */
3564 return 0;
3566 case VEC_SERIES:
3567 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3568 return gen_vec_duplicate (mode, op0);
3569 if (CONSTANT_P (op0) && CONSTANT_P (op1))
3570 return gen_const_vec_series (mode, op0, op1);
3571 return 0;
3573 case VEC_SELECT:
3574 if (!VECTOR_MODE_P (mode))
3576 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3577 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3578 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3579 gcc_assert (XVECLEN (trueop1, 0) == 1);
3580 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3582 if (vec_duplicate_p (trueop0, &elt0))
3583 return elt0;
3585 if (GET_CODE (trueop0) == CONST_VECTOR)
3586 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3587 (trueop1, 0, 0)));
3589 /* Extract a scalar element from a nested VEC_SELECT expression
3590 (with optional nested VEC_CONCAT expression). Some targets
3591 (i386) extract scalar element from a vector using chain of
3592 nested VEC_SELECT expressions. When input operand is a memory
3593 operand, this operation can be simplified to a simple scalar
3594 load from an offseted memory address. */
3595 if (GET_CODE (trueop0) == VEC_SELECT)
3597 rtx op0 = XEXP (trueop0, 0);
3598 rtx op1 = XEXP (trueop0, 1);
3600 int n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3602 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3603 int elem;
3605 rtvec vec;
3606 rtx tmp_op, tmp;
3608 gcc_assert (GET_CODE (op1) == PARALLEL);
3609 gcc_assert (i < n_elts);
3611 /* Select element, pointed by nested selector. */
3612 elem = INTVAL (XVECEXP (op1, 0, i));
3614 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3615 if (GET_CODE (op0) == VEC_CONCAT)
3617 rtx op00 = XEXP (op0, 0);
3618 rtx op01 = XEXP (op0, 1);
3620 machine_mode mode00, mode01;
3621 int n_elts00, n_elts01;
3623 mode00 = GET_MODE (op00);
3624 mode01 = GET_MODE (op01);
3626 /* Find out number of elements of each operand. */
3627 n_elts00 = GET_MODE_NUNITS (mode00);
3628 n_elts01 = GET_MODE_NUNITS (mode01);
3630 gcc_assert (n_elts == n_elts00 + n_elts01);
3632 /* Select correct operand of VEC_CONCAT
3633 and adjust selector. */
3634 if (elem < n_elts01)
3635 tmp_op = op00;
3636 else
3638 tmp_op = op01;
3639 elem -= n_elts00;
3642 else
3643 tmp_op = op0;
3645 vec = rtvec_alloc (1);
3646 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3648 tmp = gen_rtx_fmt_ee (code, mode,
3649 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3650 return tmp;
3653 else
3655 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3656 gcc_assert (GET_MODE_INNER (mode)
3657 == GET_MODE_INNER (GET_MODE (trueop0)));
3658 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3660 if (vec_duplicate_p (trueop0, &elt0))
3661 /* It doesn't matter which elements are selected by trueop1,
3662 because they are all the same. */
3663 return gen_vec_duplicate (mode, elt0);
3665 if (GET_CODE (trueop0) == CONST_VECTOR)
3667 int elt_size = GET_MODE_UNIT_SIZE (mode);
3668 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3669 rtvec v = rtvec_alloc (n_elts);
3670 unsigned int i;
3672 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3673 for (i = 0; i < n_elts; i++)
3675 rtx x = XVECEXP (trueop1, 0, i);
3677 gcc_assert (CONST_INT_P (x));
3678 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3679 INTVAL (x));
3682 return gen_rtx_CONST_VECTOR (mode, v);
3685 /* Recognize the identity. */
3686 if (GET_MODE (trueop0) == mode)
3688 bool maybe_ident = true;
3689 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3691 rtx j = XVECEXP (trueop1, 0, i);
3692 if (!CONST_INT_P (j) || INTVAL (j) != i)
3694 maybe_ident = false;
3695 break;
3698 if (maybe_ident)
3699 return trueop0;
3702 /* If we build {a,b} then permute it, build the result directly. */
3703 if (XVECLEN (trueop1, 0) == 2
3704 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3705 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3706 && GET_CODE (trueop0) == VEC_CONCAT
3707 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3708 && GET_MODE (XEXP (trueop0, 0)) == mode
3709 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3710 && GET_MODE (XEXP (trueop0, 1)) == mode)
3712 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3713 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3714 rtx subop0, subop1;
3716 gcc_assert (i0 < 4 && i1 < 4);
3717 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3718 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3720 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3723 if (XVECLEN (trueop1, 0) == 2
3724 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3725 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3726 && GET_CODE (trueop0) == VEC_CONCAT
3727 && GET_MODE (trueop0) == mode)
3729 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3730 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3731 rtx subop0, subop1;
3733 gcc_assert (i0 < 2 && i1 < 2);
3734 subop0 = XEXP (trueop0, i0);
3735 subop1 = XEXP (trueop0, i1);
3737 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3740 /* If we select one half of a vec_concat, return that. */
3741 if (GET_CODE (trueop0) == VEC_CONCAT
3742 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3744 rtx subop0 = XEXP (trueop0, 0);
3745 rtx subop1 = XEXP (trueop0, 1);
3746 machine_mode mode0 = GET_MODE (subop0);
3747 machine_mode mode1 = GET_MODE (subop1);
3748 int l0 = GET_MODE_NUNITS (mode0);
3749 int l1 = GET_MODE_NUNITS (mode1);
3750 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3751 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3753 bool success = true;
3754 for (int i = 1; i < l0; ++i)
3756 rtx j = XVECEXP (trueop1, 0, i);
3757 if (!CONST_INT_P (j) || INTVAL (j) != i)
3759 success = false;
3760 break;
3763 if (success)
3764 return subop0;
3766 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3768 bool success = true;
3769 for (int i = 1; i < l1; ++i)
3771 rtx j = XVECEXP (trueop1, 0, i);
3772 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3774 success = false;
3775 break;
3778 if (success)
3779 return subop1;
3784 if (XVECLEN (trueop1, 0) == 1
3785 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3786 && GET_CODE (trueop0) == VEC_CONCAT)
3788 rtx vec = trueop0;
3789 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3791 /* Try to find the element in the VEC_CONCAT. */
3792 while (GET_MODE (vec) != mode
3793 && GET_CODE (vec) == VEC_CONCAT)
3795 HOST_WIDE_INT vec_size;
3797 if (CONST_INT_P (XEXP (vec, 0)))
3799 /* vec_concat of two const_ints doesn't make sense with
3800 respect to modes. */
3801 if (CONST_INT_P (XEXP (vec, 1)))
3802 return 0;
3804 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3805 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3807 else
3808 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3810 if (offset < vec_size)
3811 vec = XEXP (vec, 0);
3812 else
3814 offset -= vec_size;
3815 vec = XEXP (vec, 1);
3817 vec = avoid_constant_pool_reference (vec);
3820 if (GET_MODE (vec) == mode)
3821 return vec;
3824 /* If we select elements in a vec_merge that all come from the same
3825 operand, select from that operand directly. */
3826 if (GET_CODE (op0) == VEC_MERGE)
3828 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3829 if (CONST_INT_P (trueop02))
3831 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3832 bool all_operand0 = true;
3833 bool all_operand1 = true;
3834 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3836 rtx j = XVECEXP (trueop1, 0, i);
3837 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3838 all_operand1 = false;
3839 else
3840 all_operand0 = false;
3842 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3843 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3844 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3845 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3849 /* If we have two nested selects that are inverses of each
3850 other, replace them with the source operand. */
3851 if (GET_CODE (trueop0) == VEC_SELECT
3852 && GET_MODE (XEXP (trueop0, 0)) == mode)
3854 rtx op0_subop1 = XEXP (trueop0, 1);
3855 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3856 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3858 /* Apply the outer ordering vector to the inner one. (The inner
3859 ordering vector is expressly permitted to be of a different
3860 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3861 then the two VEC_SELECTs cancel. */
3862 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3864 rtx x = XVECEXP (trueop1, 0, i);
3865 if (!CONST_INT_P (x))
3866 return 0;
3867 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3868 if (!CONST_INT_P (y) || i != INTVAL (y))
3869 return 0;
3871 return XEXP (trueop0, 0);
3874 return 0;
3875 case VEC_CONCAT:
3877 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3878 ? GET_MODE (trueop0)
3879 : GET_MODE_INNER (mode));
3880 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3881 ? GET_MODE (trueop1)
3882 : GET_MODE_INNER (mode));
3884 gcc_assert (VECTOR_MODE_P (mode));
3885 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3886 == GET_MODE_SIZE (mode));
3888 if (VECTOR_MODE_P (op0_mode))
3889 gcc_assert (GET_MODE_INNER (mode)
3890 == GET_MODE_INNER (op0_mode));
3891 else
3892 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3894 if (VECTOR_MODE_P (op1_mode))
3895 gcc_assert (GET_MODE_INNER (mode)
3896 == GET_MODE_INNER (op1_mode));
3897 else
3898 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3900 if ((GET_CODE (trueop0) == CONST_VECTOR
3901 || CONST_SCALAR_INT_P (trueop0)
3902 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3903 && (GET_CODE (trueop1) == CONST_VECTOR
3904 || CONST_SCALAR_INT_P (trueop1)
3905 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3907 unsigned n_elts = GET_MODE_NUNITS (mode);
3908 unsigned in_n_elts = GET_MODE_NUNITS (op0_mode);
3909 rtvec v = rtvec_alloc (n_elts);
3910 unsigned int i;
3911 for (i = 0; i < n_elts; i++)
3913 if (i < in_n_elts)
3915 if (!VECTOR_MODE_P (op0_mode))
3916 RTVEC_ELT (v, i) = trueop0;
3917 else
3918 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3920 else
3922 if (!VECTOR_MODE_P (op1_mode))
3923 RTVEC_ELT (v, i) = trueop1;
3924 else
3925 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3926 i - in_n_elts);
3930 return gen_rtx_CONST_VECTOR (mode, v);
3933 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3934 Restrict the transformation to avoid generating a VEC_SELECT with a
3935 mode unrelated to its operand. */
3936 if (GET_CODE (trueop0) == VEC_SELECT
3937 && GET_CODE (trueop1) == VEC_SELECT
3938 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3939 && GET_MODE (XEXP (trueop0, 0)) == mode)
3941 rtx par0 = XEXP (trueop0, 1);
3942 rtx par1 = XEXP (trueop1, 1);
3943 int len0 = XVECLEN (par0, 0);
3944 int len1 = XVECLEN (par1, 0);
3945 rtvec vec = rtvec_alloc (len0 + len1);
3946 for (int i = 0; i < len0; i++)
3947 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3948 for (int i = 0; i < len1; i++)
3949 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3950 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3951 gen_rtx_PARALLEL (VOIDmode, vec));
3954 return 0;
3956 default:
3957 gcc_unreachable ();
3960 if (mode == GET_MODE (op0)
3961 && mode == GET_MODE (op1)
3962 && vec_duplicate_p (op0, &elt0)
3963 && vec_duplicate_p (op1, &elt1))
3965 /* Try applying the operator to ELT and see if that simplifies.
3966 We can duplicate the result if so.
3968 The reason we don't use simplify_gen_binary is that it isn't
3969 necessarily a win to convert things like:
3971 (plus:V (vec_duplicate:V (reg:S R1))
3972 (vec_duplicate:V (reg:S R2)))
3976 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
3978 The first might be done entirely in vector registers while the
3979 second might need a move between register files. */
3980 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
3981 elt0, elt1);
3982 if (tem)
3983 return gen_vec_duplicate (mode, tem);
3986 return 0;
3990 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3991 rtx op0, rtx op1)
3993 if (VECTOR_MODE_P (mode)
3994 && code != VEC_CONCAT
3995 && GET_CODE (op0) == CONST_VECTOR
3996 && GET_CODE (op1) == CONST_VECTOR)
3998 unsigned int n_elts = CONST_VECTOR_NUNITS (op0);
3999 gcc_assert (n_elts == (unsigned int) CONST_VECTOR_NUNITS (op1));
4000 gcc_assert (n_elts == GET_MODE_NUNITS (mode));
4001 rtvec v = rtvec_alloc (n_elts);
4002 unsigned int i;
4004 for (i = 0; i < n_elts; i++)
4006 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4007 CONST_VECTOR_ELT (op0, i),
4008 CONST_VECTOR_ELT (op1, i));
4009 if (!x)
4010 return 0;
4011 RTVEC_ELT (v, i) = x;
4014 return gen_rtx_CONST_VECTOR (mode, v);
4017 if (VECTOR_MODE_P (mode)
4018 && code == VEC_CONCAT
4019 && (CONST_SCALAR_INT_P (op0)
4020 || GET_CODE (op0) == CONST_FIXED
4021 || CONST_DOUBLE_AS_FLOAT_P (op0))
4022 && (CONST_SCALAR_INT_P (op1)
4023 || CONST_DOUBLE_AS_FLOAT_P (op1)
4024 || GET_CODE (op1) == CONST_FIXED))
4026 unsigned n_elts = GET_MODE_NUNITS (mode);
4027 rtvec v = rtvec_alloc (n_elts);
4029 gcc_assert (n_elts >= 2);
4030 if (n_elts == 2)
4032 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4033 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4035 RTVEC_ELT (v, 0) = op0;
4036 RTVEC_ELT (v, 1) = op1;
4038 else
4040 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
4041 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
4042 unsigned i;
4044 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4045 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4046 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4048 for (i = 0; i < op0_n_elts; ++i)
4049 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
4050 for (i = 0; i < op1_n_elts; ++i)
4051 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
4054 return gen_rtx_CONST_VECTOR (mode, v);
4057 if (SCALAR_FLOAT_MODE_P (mode)
4058 && CONST_DOUBLE_AS_FLOAT_P (op0)
4059 && CONST_DOUBLE_AS_FLOAT_P (op1)
4060 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4062 if (code == AND
4063 || code == IOR
4064 || code == XOR)
4066 long tmp0[4];
4067 long tmp1[4];
4068 REAL_VALUE_TYPE r;
4069 int i;
4071 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4072 GET_MODE (op0));
4073 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4074 GET_MODE (op1));
4075 for (i = 0; i < 4; i++)
4077 switch (code)
4079 case AND:
4080 tmp0[i] &= tmp1[i];
4081 break;
4082 case IOR:
4083 tmp0[i] |= tmp1[i];
4084 break;
4085 case XOR:
4086 tmp0[i] ^= tmp1[i];
4087 break;
4088 default:
4089 gcc_unreachable ();
4092 real_from_target (&r, tmp0, mode);
4093 return const_double_from_real_value (r, mode);
4095 else
4097 REAL_VALUE_TYPE f0, f1, value, result;
4098 const REAL_VALUE_TYPE *opr0, *opr1;
4099 bool inexact;
4101 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4102 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4104 if (HONOR_SNANS (mode)
4105 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4106 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4107 return 0;
4109 real_convert (&f0, mode, opr0);
4110 real_convert (&f1, mode, opr1);
4112 if (code == DIV
4113 && real_equal (&f1, &dconst0)
4114 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4115 return 0;
4117 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4118 && flag_trapping_math
4119 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4121 int s0 = REAL_VALUE_NEGATIVE (f0);
4122 int s1 = REAL_VALUE_NEGATIVE (f1);
4124 switch (code)
4126 case PLUS:
4127 /* Inf + -Inf = NaN plus exception. */
4128 if (s0 != s1)
4129 return 0;
4130 break;
4131 case MINUS:
4132 /* Inf - Inf = NaN plus exception. */
4133 if (s0 == s1)
4134 return 0;
4135 break;
4136 case DIV:
4137 /* Inf / Inf = NaN plus exception. */
4138 return 0;
4139 default:
4140 break;
4144 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4145 && flag_trapping_math
4146 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4147 || (REAL_VALUE_ISINF (f1)
4148 && real_equal (&f0, &dconst0))))
4149 /* Inf * 0 = NaN plus exception. */
4150 return 0;
4152 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4153 &f0, &f1);
4154 real_convert (&result, mode, &value);
4156 /* Don't constant fold this floating point operation if
4157 the result has overflowed and flag_trapping_math. */
4159 if (flag_trapping_math
4160 && MODE_HAS_INFINITIES (mode)
4161 && REAL_VALUE_ISINF (result)
4162 && !REAL_VALUE_ISINF (f0)
4163 && !REAL_VALUE_ISINF (f1))
4164 /* Overflow plus exception. */
4165 return 0;
4167 /* Don't constant fold this floating point operation if the
4168 result may dependent upon the run-time rounding mode and
4169 flag_rounding_math is set, or if GCC's software emulation
4170 is unable to accurately represent the result. */
4172 if ((flag_rounding_math
4173 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4174 && (inexact || !real_identical (&result, &value)))
4175 return NULL_RTX;
4177 return const_double_from_real_value (result, mode);
4181 /* We can fold some multi-word operations. */
4182 scalar_int_mode int_mode;
4183 if (is_a <scalar_int_mode> (mode, &int_mode)
4184 && CONST_SCALAR_INT_P (op0)
4185 && CONST_SCALAR_INT_P (op1))
4187 wide_int result;
4188 bool overflow;
4189 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4190 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4192 #if TARGET_SUPPORTS_WIDE_INT == 0
4193 /* This assert keeps the simplification from producing a result
4194 that cannot be represented in a CONST_DOUBLE but a lot of
4195 upstream callers expect that this function never fails to
4196 simplify something and so you if you added this to the test
4197 above the code would die later anyway. If this assert
4198 happens, you just need to make the port support wide int. */
4199 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4200 #endif
4201 switch (code)
4203 case MINUS:
4204 result = wi::sub (pop0, pop1);
4205 break;
4207 case PLUS:
4208 result = wi::add (pop0, pop1);
4209 break;
4211 case MULT:
4212 result = wi::mul (pop0, pop1);
4213 break;
4215 case DIV:
4216 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4217 if (overflow)
4218 return NULL_RTX;
4219 break;
4221 case MOD:
4222 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4223 if (overflow)
4224 return NULL_RTX;
4225 break;
4227 case UDIV:
4228 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4229 if (overflow)
4230 return NULL_RTX;
4231 break;
4233 case UMOD:
4234 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4235 if (overflow)
4236 return NULL_RTX;
4237 break;
4239 case AND:
4240 result = wi::bit_and (pop0, pop1);
4241 break;
4243 case IOR:
4244 result = wi::bit_or (pop0, pop1);
4245 break;
4247 case XOR:
4248 result = wi::bit_xor (pop0, pop1);
4249 break;
4251 case SMIN:
4252 result = wi::smin (pop0, pop1);
4253 break;
4255 case SMAX:
4256 result = wi::smax (pop0, pop1);
4257 break;
4259 case UMIN:
4260 result = wi::umin (pop0, pop1);
4261 break;
4263 case UMAX:
4264 result = wi::umax (pop0, pop1);
4265 break;
4267 case LSHIFTRT:
4268 case ASHIFTRT:
4269 case ASHIFT:
4271 wide_int wop1 = pop1;
4272 if (SHIFT_COUNT_TRUNCATED)
4273 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4274 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4275 return NULL_RTX;
4277 switch (code)
4279 case LSHIFTRT:
4280 result = wi::lrshift (pop0, wop1);
4281 break;
4283 case ASHIFTRT:
4284 result = wi::arshift (pop0, wop1);
4285 break;
4287 case ASHIFT:
4288 result = wi::lshift (pop0, wop1);
4289 break;
4291 default:
4292 gcc_unreachable ();
4294 break;
4296 case ROTATE:
4297 case ROTATERT:
4299 if (wi::neg_p (pop1))
4300 return NULL_RTX;
4302 switch (code)
4304 case ROTATE:
4305 result = wi::lrotate (pop0, pop1);
4306 break;
4308 case ROTATERT:
4309 result = wi::rrotate (pop0, pop1);
4310 break;
4312 default:
4313 gcc_unreachable ();
4315 break;
4317 default:
4318 return NULL_RTX;
4320 return immed_wide_int_const (result, int_mode);
4323 return NULL_RTX;
4328 /* Return a positive integer if X should sort after Y. The value
4329 returned is 1 if and only if X and Y are both regs. */
4331 static int
4332 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4334 int result;
4336 result = (commutative_operand_precedence (y)
4337 - commutative_operand_precedence (x));
4338 if (result)
4339 return result + result;
4341 /* Group together equal REGs to do more simplification. */
4342 if (REG_P (x) && REG_P (y))
4343 return REGNO (x) > REGNO (y);
4345 return 0;
4348 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4349 operands may be another PLUS or MINUS.
4351 Rather than test for specific case, we do this by a brute-force method
4352 and do all possible simplifications until no more changes occur. Then
4353 we rebuild the operation.
4355 May return NULL_RTX when no changes were made. */
4357 static rtx
4358 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4359 rtx op1)
4361 struct simplify_plus_minus_op_data
4363 rtx op;
4364 short neg;
4365 } ops[16];
4366 rtx result, tem;
4367 int n_ops = 2;
4368 int changed, n_constants, canonicalized = 0;
4369 int i, j;
4371 memset (ops, 0, sizeof ops);
4373 /* Set up the two operands and then expand them until nothing has been
4374 changed. If we run out of room in our array, give up; this should
4375 almost never happen. */
4377 ops[0].op = op0;
4378 ops[0].neg = 0;
4379 ops[1].op = op1;
4380 ops[1].neg = (code == MINUS);
4384 changed = 0;
4385 n_constants = 0;
4387 for (i = 0; i < n_ops; i++)
4389 rtx this_op = ops[i].op;
4390 int this_neg = ops[i].neg;
4391 enum rtx_code this_code = GET_CODE (this_op);
4393 switch (this_code)
4395 case PLUS:
4396 case MINUS:
4397 if (n_ops == ARRAY_SIZE (ops))
4398 return NULL_RTX;
4400 ops[n_ops].op = XEXP (this_op, 1);
4401 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4402 n_ops++;
4404 ops[i].op = XEXP (this_op, 0);
4405 changed = 1;
4406 /* If this operand was negated then we will potentially
4407 canonicalize the expression. Similarly if we don't
4408 place the operands adjacent we're re-ordering the
4409 expression and thus might be performing a
4410 canonicalization. Ignore register re-ordering.
4411 ??? It might be better to shuffle the ops array here,
4412 but then (plus (plus (A, B), plus (C, D))) wouldn't
4413 be seen as non-canonical. */
4414 if (this_neg
4415 || (i != n_ops - 2
4416 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4417 canonicalized = 1;
4418 break;
4420 case NEG:
4421 ops[i].op = XEXP (this_op, 0);
4422 ops[i].neg = ! this_neg;
4423 changed = 1;
4424 canonicalized = 1;
4425 break;
4427 case CONST:
4428 if (n_ops != ARRAY_SIZE (ops)
4429 && GET_CODE (XEXP (this_op, 0)) == PLUS
4430 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4431 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4433 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4434 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4435 ops[n_ops].neg = this_neg;
4436 n_ops++;
4437 changed = 1;
4438 canonicalized = 1;
4440 break;
4442 case NOT:
4443 /* ~a -> (-a - 1) */
4444 if (n_ops != ARRAY_SIZE (ops))
4446 ops[n_ops].op = CONSTM1_RTX (mode);
4447 ops[n_ops++].neg = this_neg;
4448 ops[i].op = XEXP (this_op, 0);
4449 ops[i].neg = !this_neg;
4450 changed = 1;
4451 canonicalized = 1;
4453 break;
4455 case CONST_INT:
4456 n_constants++;
4457 if (this_neg)
4459 ops[i].op = neg_const_int (mode, this_op);
4460 ops[i].neg = 0;
4461 changed = 1;
4462 canonicalized = 1;
4464 break;
4466 default:
4467 break;
4471 while (changed);
4473 if (n_constants > 1)
4474 canonicalized = 1;
4476 gcc_assert (n_ops >= 2);
4478 /* If we only have two operands, we can avoid the loops. */
4479 if (n_ops == 2)
4481 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4482 rtx lhs, rhs;
4484 /* Get the two operands. Be careful with the order, especially for
4485 the cases where code == MINUS. */
4486 if (ops[0].neg && ops[1].neg)
4488 lhs = gen_rtx_NEG (mode, ops[0].op);
4489 rhs = ops[1].op;
4491 else if (ops[0].neg)
4493 lhs = ops[1].op;
4494 rhs = ops[0].op;
4496 else
4498 lhs = ops[0].op;
4499 rhs = ops[1].op;
4502 return simplify_const_binary_operation (code, mode, lhs, rhs);
4505 /* Now simplify each pair of operands until nothing changes. */
4506 while (1)
4508 /* Insertion sort is good enough for a small array. */
4509 for (i = 1; i < n_ops; i++)
4511 struct simplify_plus_minus_op_data save;
4512 int cmp;
4514 j = i - 1;
4515 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4516 if (cmp <= 0)
4517 continue;
4518 /* Just swapping registers doesn't count as canonicalization. */
4519 if (cmp != 1)
4520 canonicalized = 1;
4522 save = ops[i];
4524 ops[j + 1] = ops[j];
4525 while (j--
4526 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4527 ops[j + 1] = save;
4530 changed = 0;
4531 for (i = n_ops - 1; i > 0; i--)
4532 for (j = i - 1; j >= 0; j--)
4534 rtx lhs = ops[j].op, rhs = ops[i].op;
4535 int lneg = ops[j].neg, rneg = ops[i].neg;
4537 if (lhs != 0 && rhs != 0)
4539 enum rtx_code ncode = PLUS;
4541 if (lneg != rneg)
4543 ncode = MINUS;
4544 if (lneg)
4545 std::swap (lhs, rhs);
4547 else if (swap_commutative_operands_p (lhs, rhs))
4548 std::swap (lhs, rhs);
4550 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4551 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4553 rtx tem_lhs, tem_rhs;
4555 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4556 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4557 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4558 tem_rhs);
4560 if (tem && !CONSTANT_P (tem))
4561 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4563 else
4564 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4566 if (tem)
4568 /* Reject "simplifications" that just wrap the two
4569 arguments in a CONST. Failure to do so can result
4570 in infinite recursion with simplify_binary_operation
4571 when it calls us to simplify CONST operations.
4572 Also, if we find such a simplification, don't try
4573 any more combinations with this rhs: We must have
4574 something like symbol+offset, ie. one of the
4575 trivial CONST expressions we handle later. */
4576 if (GET_CODE (tem) == CONST
4577 && GET_CODE (XEXP (tem, 0)) == ncode
4578 && XEXP (XEXP (tem, 0), 0) == lhs
4579 && XEXP (XEXP (tem, 0), 1) == rhs)
4580 break;
4581 lneg &= rneg;
4582 if (GET_CODE (tem) == NEG)
4583 tem = XEXP (tem, 0), lneg = !lneg;
4584 if (CONST_INT_P (tem) && lneg)
4585 tem = neg_const_int (mode, tem), lneg = 0;
4587 ops[i].op = tem;
4588 ops[i].neg = lneg;
4589 ops[j].op = NULL_RTX;
4590 changed = 1;
4591 canonicalized = 1;
4596 if (!changed)
4597 break;
4599 /* Pack all the operands to the lower-numbered entries. */
4600 for (i = 0, j = 0; j < n_ops; j++)
4601 if (ops[j].op)
4603 ops[i] = ops[j];
4604 i++;
4606 n_ops = i;
4609 /* If nothing changed, check that rematerialization of rtl instructions
4610 is still required. */
4611 if (!canonicalized)
4613 /* Perform rematerialization if only all operands are registers and
4614 all operations are PLUS. */
4615 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4616 around rs6000 and how it uses the CA register. See PR67145. */
4617 for (i = 0; i < n_ops; i++)
4618 if (ops[i].neg
4619 || !REG_P (ops[i].op)
4620 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4621 && fixed_regs[REGNO (ops[i].op)]
4622 && !global_regs[REGNO (ops[i].op)]
4623 && ops[i].op != frame_pointer_rtx
4624 && ops[i].op != arg_pointer_rtx
4625 && ops[i].op != stack_pointer_rtx))
4626 return NULL_RTX;
4627 goto gen_result;
4630 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4631 if (n_ops == 2
4632 && CONST_INT_P (ops[1].op)
4633 && CONSTANT_P (ops[0].op)
4634 && ops[0].neg)
4635 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4637 /* We suppressed creation of trivial CONST expressions in the
4638 combination loop to avoid recursion. Create one manually now.
4639 The combination loop should have ensured that there is exactly
4640 one CONST_INT, and the sort will have ensured that it is last
4641 in the array and that any other constant will be next-to-last. */
4643 if (n_ops > 1
4644 && CONST_INT_P (ops[n_ops - 1].op)
4645 && CONSTANT_P (ops[n_ops - 2].op))
4647 rtx value = ops[n_ops - 1].op;
4648 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4649 value = neg_const_int (mode, value);
4650 if (CONST_INT_P (value))
4652 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4653 INTVAL (value));
4654 n_ops--;
4658 /* Put a non-negated operand first, if possible. */
4660 for (i = 0; i < n_ops && ops[i].neg; i++)
4661 continue;
4662 if (i == n_ops)
4663 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4664 else if (i != 0)
4666 tem = ops[0].op;
4667 ops[0] = ops[i];
4668 ops[i].op = tem;
4669 ops[i].neg = 1;
4672 /* Now make the result by performing the requested operations. */
4673 gen_result:
4674 result = ops[0].op;
4675 for (i = 1; i < n_ops; i++)
4676 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4677 mode, result, ops[i].op);
4679 return result;
4682 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4683 static bool
4684 plus_minus_operand_p (const_rtx x)
4686 return GET_CODE (x) == PLUS
4687 || GET_CODE (x) == MINUS
4688 || (GET_CODE (x) == CONST
4689 && GET_CODE (XEXP (x, 0)) == PLUS
4690 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4691 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4694 /* Like simplify_binary_operation except used for relational operators.
4695 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4696 not also be VOIDmode.
4698 CMP_MODE specifies in which mode the comparison is done in, so it is
4699 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4700 the operands or, if both are VOIDmode, the operands are compared in
4701 "infinite precision". */
4703 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4704 machine_mode cmp_mode, rtx op0, rtx op1)
4706 rtx tem, trueop0, trueop1;
4708 if (cmp_mode == VOIDmode)
4709 cmp_mode = GET_MODE (op0);
4710 if (cmp_mode == VOIDmode)
4711 cmp_mode = GET_MODE (op1);
4713 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4714 if (tem)
4716 if (SCALAR_FLOAT_MODE_P (mode))
4718 if (tem == const0_rtx)
4719 return CONST0_RTX (mode);
4720 #ifdef FLOAT_STORE_FLAG_VALUE
4722 REAL_VALUE_TYPE val;
4723 val = FLOAT_STORE_FLAG_VALUE (mode);
4724 return const_double_from_real_value (val, mode);
4726 #else
4727 return NULL_RTX;
4728 #endif
4730 if (VECTOR_MODE_P (mode))
4732 if (tem == const0_rtx)
4733 return CONST0_RTX (mode);
4734 #ifdef VECTOR_STORE_FLAG_VALUE
4736 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4737 if (val == NULL_RTX)
4738 return NULL_RTX;
4739 if (val == const1_rtx)
4740 return CONST1_RTX (mode);
4742 return gen_const_vec_duplicate (mode, val);
4744 #else
4745 return NULL_RTX;
4746 #endif
4749 return tem;
4752 /* For the following tests, ensure const0_rtx is op1. */
4753 if (swap_commutative_operands_p (op0, op1)
4754 || (op0 == const0_rtx && op1 != const0_rtx))
4755 std::swap (op0, op1), code = swap_condition (code);
4757 /* If op0 is a compare, extract the comparison arguments from it. */
4758 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4759 return simplify_gen_relational (code, mode, VOIDmode,
4760 XEXP (op0, 0), XEXP (op0, 1));
4762 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4763 || CC0_P (op0))
4764 return NULL_RTX;
4766 trueop0 = avoid_constant_pool_reference (op0);
4767 trueop1 = avoid_constant_pool_reference (op1);
4768 return simplify_relational_operation_1 (code, mode, cmp_mode,
4769 trueop0, trueop1);
4772 /* This part of simplify_relational_operation is only used when CMP_MODE
4773 is not in class MODE_CC (i.e. it is a real comparison).
4775 MODE is the mode of the result, while CMP_MODE specifies in which
4776 mode the comparison is done in, so it is the mode of the operands. */
4778 static rtx
4779 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4780 machine_mode cmp_mode, rtx op0, rtx op1)
4782 enum rtx_code op0code = GET_CODE (op0);
4784 if (op1 == const0_rtx && COMPARISON_P (op0))
4786 /* If op0 is a comparison, extract the comparison arguments
4787 from it. */
4788 if (code == NE)
4790 if (GET_MODE (op0) == mode)
4791 return simplify_rtx (op0);
4792 else
4793 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4794 XEXP (op0, 0), XEXP (op0, 1));
4796 else if (code == EQ)
4798 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4799 if (new_code != UNKNOWN)
4800 return simplify_gen_relational (new_code, mode, VOIDmode,
4801 XEXP (op0, 0), XEXP (op0, 1));
4805 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4806 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4807 if ((code == LTU || code == GEU)
4808 && GET_CODE (op0) == PLUS
4809 && CONST_INT_P (XEXP (op0, 1))
4810 && (rtx_equal_p (op1, XEXP (op0, 0))
4811 || rtx_equal_p (op1, XEXP (op0, 1)))
4812 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4813 && XEXP (op0, 1) != const0_rtx)
4815 rtx new_cmp
4816 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4817 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4818 cmp_mode, XEXP (op0, 0), new_cmp);
4821 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4822 transformed into (LTU a -C). */
4823 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4824 && CONST_INT_P (XEXP (op0, 1))
4825 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4826 && XEXP (op0, 1) != const0_rtx)
4828 rtx new_cmp
4829 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4830 return simplify_gen_relational (LTU, mode, cmp_mode,
4831 XEXP (op0, 0), new_cmp);
4834 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4835 if ((code == LTU || code == GEU)
4836 && GET_CODE (op0) == PLUS
4837 && rtx_equal_p (op1, XEXP (op0, 1))
4838 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4839 && !rtx_equal_p (op1, XEXP (op0, 0)))
4840 return simplify_gen_relational (code, mode, cmp_mode, op0,
4841 copy_rtx (XEXP (op0, 0)));
4843 if (op1 == const0_rtx)
4845 /* Canonicalize (GTU x 0) as (NE x 0). */
4846 if (code == GTU)
4847 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4848 /* Canonicalize (LEU x 0) as (EQ x 0). */
4849 if (code == LEU)
4850 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4852 else if (op1 == const1_rtx)
4854 switch (code)
4856 case GE:
4857 /* Canonicalize (GE x 1) as (GT x 0). */
4858 return simplify_gen_relational (GT, mode, cmp_mode,
4859 op0, const0_rtx);
4860 case GEU:
4861 /* Canonicalize (GEU x 1) as (NE x 0). */
4862 return simplify_gen_relational (NE, mode, cmp_mode,
4863 op0, const0_rtx);
4864 case LT:
4865 /* Canonicalize (LT x 1) as (LE x 0). */
4866 return simplify_gen_relational (LE, mode, cmp_mode,
4867 op0, const0_rtx);
4868 case LTU:
4869 /* Canonicalize (LTU x 1) as (EQ x 0). */
4870 return simplify_gen_relational (EQ, mode, cmp_mode,
4871 op0, const0_rtx);
4872 default:
4873 break;
4876 else if (op1 == constm1_rtx)
4878 /* Canonicalize (LE x -1) as (LT x 0). */
4879 if (code == LE)
4880 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4881 /* Canonicalize (GT x -1) as (GE x 0). */
4882 if (code == GT)
4883 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4886 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4887 if ((code == EQ || code == NE)
4888 && (op0code == PLUS || op0code == MINUS)
4889 && CONSTANT_P (op1)
4890 && CONSTANT_P (XEXP (op0, 1))
4891 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4893 rtx x = XEXP (op0, 0);
4894 rtx c = XEXP (op0, 1);
4895 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4896 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4898 /* Detect an infinite recursive condition, where we oscillate at this
4899 simplification case between:
4900 A + B == C <---> C - B == A,
4901 where A, B, and C are all constants with non-simplifiable expressions,
4902 usually SYMBOL_REFs. */
4903 if (GET_CODE (tem) == invcode
4904 && CONSTANT_P (x)
4905 && rtx_equal_p (c, XEXP (tem, 1)))
4906 return NULL_RTX;
4908 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4911 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4912 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4913 scalar_int_mode int_mode, int_cmp_mode;
4914 if (code == NE
4915 && op1 == const0_rtx
4916 && is_int_mode (mode, &int_mode)
4917 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4918 /* ??? Work-around BImode bugs in the ia64 backend. */
4919 && int_mode != BImode
4920 && int_cmp_mode != BImode
4921 && nonzero_bits (op0, int_cmp_mode) == 1
4922 && STORE_FLAG_VALUE == 1)
4923 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
4924 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
4925 : lowpart_subreg (int_mode, op0, int_cmp_mode);
4927 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4928 if ((code == EQ || code == NE)
4929 && op1 == const0_rtx
4930 && op0code == XOR)
4931 return simplify_gen_relational (code, mode, cmp_mode,
4932 XEXP (op0, 0), XEXP (op0, 1));
4934 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4935 if ((code == EQ || code == NE)
4936 && op0code == XOR
4937 && rtx_equal_p (XEXP (op0, 0), op1)
4938 && !side_effects_p (XEXP (op0, 0)))
4939 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4940 CONST0_RTX (mode));
4942 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4943 if ((code == EQ || code == NE)
4944 && op0code == XOR
4945 && rtx_equal_p (XEXP (op0, 1), op1)
4946 && !side_effects_p (XEXP (op0, 1)))
4947 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4948 CONST0_RTX (mode));
4950 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4951 if ((code == EQ || code == NE)
4952 && op0code == XOR
4953 && CONST_SCALAR_INT_P (op1)
4954 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4955 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4956 simplify_gen_binary (XOR, cmp_mode,
4957 XEXP (op0, 1), op1));
4959 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4960 can be implemented with a BICS instruction on some targets, or
4961 constant-folded if y is a constant. */
4962 if ((code == EQ || code == NE)
4963 && op0code == AND
4964 && rtx_equal_p (XEXP (op0, 0), op1)
4965 && !side_effects_p (op1)
4966 && op1 != CONST0_RTX (cmp_mode))
4968 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4969 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4971 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4972 CONST0_RTX (cmp_mode));
4975 /* Likewise for (eq/ne (and x y) y). */
4976 if ((code == EQ || code == NE)
4977 && op0code == AND
4978 && rtx_equal_p (XEXP (op0, 1), op1)
4979 && !side_effects_p (op1)
4980 && op1 != CONST0_RTX (cmp_mode))
4982 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4983 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4985 return simplify_gen_relational (code, mode, cmp_mode, lhs,
4986 CONST0_RTX (cmp_mode));
4989 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4990 if ((code == EQ || code == NE)
4991 && GET_CODE (op0) == BSWAP
4992 && CONST_SCALAR_INT_P (op1))
4993 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4994 simplify_gen_unary (BSWAP, cmp_mode,
4995 op1, cmp_mode));
4997 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4998 if ((code == EQ || code == NE)
4999 && GET_CODE (op0) == BSWAP
5000 && GET_CODE (op1) == BSWAP)
5001 return simplify_gen_relational (code, mode, cmp_mode,
5002 XEXP (op0, 0), XEXP (op1, 0));
5004 if (op0code == POPCOUNT && op1 == const0_rtx)
5005 switch (code)
5007 case EQ:
5008 case LE:
5009 case LEU:
5010 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5011 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5012 XEXP (op0, 0), const0_rtx);
5014 case NE:
5015 case GT:
5016 case GTU:
5017 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5018 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5019 XEXP (op0, 0), const0_rtx);
5021 default:
5022 break;
5025 return NULL_RTX;
5028 enum
5030 CMP_EQ = 1,
5031 CMP_LT = 2,
5032 CMP_GT = 4,
5033 CMP_LTU = 8,
5034 CMP_GTU = 16
5038 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5039 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5040 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5041 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5042 For floating-point comparisons, assume that the operands were ordered. */
5044 static rtx
5045 comparison_result (enum rtx_code code, int known_results)
5047 switch (code)
5049 case EQ:
5050 case UNEQ:
5051 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5052 case NE:
5053 case LTGT:
5054 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5056 case LT:
5057 case UNLT:
5058 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5059 case GE:
5060 case UNGE:
5061 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5063 case GT:
5064 case UNGT:
5065 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5066 case LE:
5067 case UNLE:
5068 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5070 case LTU:
5071 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5072 case GEU:
5073 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5075 case GTU:
5076 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5077 case LEU:
5078 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5080 case ORDERED:
5081 return const_true_rtx;
5082 case UNORDERED:
5083 return const0_rtx;
5084 default:
5085 gcc_unreachable ();
5089 /* Check if the given comparison (done in the given MODE) is actually
5090 a tautology or a contradiction. If the mode is VOID_mode, the
5091 comparison is done in "infinite precision". If no simplification
5092 is possible, this function returns zero. Otherwise, it returns
5093 either const_true_rtx or const0_rtx. */
5096 simplify_const_relational_operation (enum rtx_code code,
5097 machine_mode mode,
5098 rtx op0, rtx op1)
5100 rtx tem;
5101 rtx trueop0;
5102 rtx trueop1;
5104 gcc_assert (mode != VOIDmode
5105 || (GET_MODE (op0) == VOIDmode
5106 && GET_MODE (op1) == VOIDmode));
5108 /* If op0 is a compare, extract the comparison arguments from it. */
5109 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5111 op1 = XEXP (op0, 1);
5112 op0 = XEXP (op0, 0);
5114 if (GET_MODE (op0) != VOIDmode)
5115 mode = GET_MODE (op0);
5116 else if (GET_MODE (op1) != VOIDmode)
5117 mode = GET_MODE (op1);
5118 else
5119 return 0;
5122 /* We can't simplify MODE_CC values since we don't know what the
5123 actual comparison is. */
5124 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5125 return 0;
5127 /* Make sure the constant is second. */
5128 if (swap_commutative_operands_p (op0, op1))
5130 std::swap (op0, op1);
5131 code = swap_condition (code);
5134 trueop0 = avoid_constant_pool_reference (op0);
5135 trueop1 = avoid_constant_pool_reference (op1);
5137 /* For integer comparisons of A and B maybe we can simplify A - B and can
5138 then simplify a comparison of that with zero. If A and B are both either
5139 a register or a CONST_INT, this can't help; testing for these cases will
5140 prevent infinite recursion here and speed things up.
5142 We can only do this for EQ and NE comparisons as otherwise we may
5143 lose or introduce overflow which we cannot disregard as undefined as
5144 we do not know the signedness of the operation on either the left or
5145 the right hand side of the comparison. */
5147 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5148 && (code == EQ || code == NE)
5149 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5150 && (REG_P (op1) || CONST_INT_P (trueop1)))
5151 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5152 /* We cannot do this if tem is a nonzero address. */
5153 && ! nonzero_address_p (tem))
5154 return simplify_const_relational_operation (signed_condition (code),
5155 mode, tem, const0_rtx);
5157 if (! HONOR_NANS (mode) && code == ORDERED)
5158 return const_true_rtx;
5160 if (! HONOR_NANS (mode) && code == UNORDERED)
5161 return const0_rtx;
5163 /* For modes without NaNs, if the two operands are equal, we know the
5164 result except if they have side-effects. Even with NaNs we know
5165 the result of unordered comparisons and, if signaling NaNs are
5166 irrelevant, also the result of LT/GT/LTGT. */
5167 if ((! HONOR_NANS (trueop0)
5168 || code == UNEQ || code == UNLE || code == UNGE
5169 || ((code == LT || code == GT || code == LTGT)
5170 && ! HONOR_SNANS (trueop0)))
5171 && rtx_equal_p (trueop0, trueop1)
5172 && ! side_effects_p (trueop0))
5173 return comparison_result (code, CMP_EQ);
5175 /* If the operands are floating-point constants, see if we can fold
5176 the result. */
5177 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5178 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5179 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5181 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5182 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5184 /* Comparisons are unordered iff at least one of the values is NaN. */
5185 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5186 switch (code)
5188 case UNEQ:
5189 case UNLT:
5190 case UNGT:
5191 case UNLE:
5192 case UNGE:
5193 case NE:
5194 case UNORDERED:
5195 return const_true_rtx;
5196 case EQ:
5197 case LT:
5198 case GT:
5199 case LE:
5200 case GE:
5201 case LTGT:
5202 case ORDERED:
5203 return const0_rtx;
5204 default:
5205 return 0;
5208 return comparison_result (code,
5209 (real_equal (d0, d1) ? CMP_EQ :
5210 real_less (d0, d1) ? CMP_LT : CMP_GT));
5213 /* Otherwise, see if the operands are both integers. */
5214 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5215 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5217 /* It would be nice if we really had a mode here. However, the
5218 largest int representable on the target is as good as
5219 infinite. */
5220 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5221 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5222 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5224 if (wi::eq_p (ptrueop0, ptrueop1))
5225 return comparison_result (code, CMP_EQ);
5226 else
5228 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5229 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5230 return comparison_result (code, cr);
5234 /* Optimize comparisons with upper and lower bounds. */
5235 scalar_int_mode int_mode;
5236 if (CONST_INT_P (trueop1)
5237 && is_a <scalar_int_mode> (mode, &int_mode)
5238 && HWI_COMPUTABLE_MODE_P (int_mode)
5239 && !side_effects_p (trueop0))
5241 int sign;
5242 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5243 HOST_WIDE_INT val = INTVAL (trueop1);
5244 HOST_WIDE_INT mmin, mmax;
5246 if (code == GEU
5247 || code == LEU
5248 || code == GTU
5249 || code == LTU)
5250 sign = 0;
5251 else
5252 sign = 1;
5254 /* Get a reduced range if the sign bit is zero. */
5255 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5257 mmin = 0;
5258 mmax = nonzero;
5260 else
5262 rtx mmin_rtx, mmax_rtx;
5263 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5265 mmin = INTVAL (mmin_rtx);
5266 mmax = INTVAL (mmax_rtx);
5267 if (sign)
5269 unsigned int sign_copies
5270 = num_sign_bit_copies (trueop0, int_mode);
5272 mmin >>= (sign_copies - 1);
5273 mmax >>= (sign_copies - 1);
5277 switch (code)
5279 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5280 case GEU:
5281 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5282 return const_true_rtx;
5283 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5284 return const0_rtx;
5285 break;
5286 case GE:
5287 if (val <= mmin)
5288 return const_true_rtx;
5289 if (val > mmax)
5290 return const0_rtx;
5291 break;
5293 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5294 case LEU:
5295 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5296 return const_true_rtx;
5297 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5298 return const0_rtx;
5299 break;
5300 case LE:
5301 if (val >= mmax)
5302 return const_true_rtx;
5303 if (val < mmin)
5304 return const0_rtx;
5305 break;
5307 case EQ:
5308 /* x == y is always false for y out of range. */
5309 if (val < mmin || val > mmax)
5310 return const0_rtx;
5311 break;
5313 /* x > y is always false for y >= mmax, always true for y < mmin. */
5314 case GTU:
5315 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5316 return const0_rtx;
5317 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5318 return const_true_rtx;
5319 break;
5320 case GT:
5321 if (val >= mmax)
5322 return const0_rtx;
5323 if (val < mmin)
5324 return const_true_rtx;
5325 break;
5327 /* x < y is always false for y <= mmin, always true for y > mmax. */
5328 case LTU:
5329 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5330 return const0_rtx;
5331 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5332 return const_true_rtx;
5333 break;
5334 case LT:
5335 if (val <= mmin)
5336 return const0_rtx;
5337 if (val > mmax)
5338 return const_true_rtx;
5339 break;
5341 case NE:
5342 /* x != y is always true for y out of range. */
5343 if (val < mmin || val > mmax)
5344 return const_true_rtx;
5345 break;
5347 default:
5348 break;
5352 /* Optimize integer comparisons with zero. */
5353 if (is_a <scalar_int_mode> (mode, &int_mode)
5354 && trueop1 == const0_rtx
5355 && !side_effects_p (trueop0))
5357 /* Some addresses are known to be nonzero. We don't know
5358 their sign, but equality comparisons are known. */
5359 if (nonzero_address_p (trueop0))
5361 if (code == EQ || code == LEU)
5362 return const0_rtx;
5363 if (code == NE || code == GTU)
5364 return const_true_rtx;
5367 /* See if the first operand is an IOR with a constant. If so, we
5368 may be able to determine the result of this comparison. */
5369 if (GET_CODE (op0) == IOR)
5371 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5372 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5374 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5375 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5376 && (UINTVAL (inner_const)
5377 & (HOST_WIDE_INT_1U
5378 << sign_bitnum)));
5380 switch (code)
5382 case EQ:
5383 case LEU:
5384 return const0_rtx;
5385 case NE:
5386 case GTU:
5387 return const_true_rtx;
5388 case LT:
5389 case LE:
5390 if (has_sign)
5391 return const_true_rtx;
5392 break;
5393 case GT:
5394 case GE:
5395 if (has_sign)
5396 return const0_rtx;
5397 break;
5398 default:
5399 break;
5405 /* Optimize comparison of ABS with zero. */
5406 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5407 && (GET_CODE (trueop0) == ABS
5408 || (GET_CODE (trueop0) == FLOAT_EXTEND
5409 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5411 switch (code)
5413 case LT:
5414 /* Optimize abs(x) < 0.0. */
5415 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5416 return const0_rtx;
5417 break;
5419 case GE:
5420 /* Optimize abs(x) >= 0.0. */
5421 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5422 return const_true_rtx;
5423 break;
5425 case UNGE:
5426 /* Optimize ! (abs(x) < 0.0). */
5427 return const_true_rtx;
5429 default:
5430 break;
5434 return 0;
5437 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5438 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5439 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5440 can be simplified to that or NULL_RTX if not.
5441 Assume X is compared against zero with CMP_CODE and the true
5442 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5444 static rtx
5445 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5447 if (cmp_code != EQ && cmp_code != NE)
5448 return NULL_RTX;
5450 /* Result on X == 0 and X !=0 respectively. */
5451 rtx on_zero, on_nonzero;
5452 if (cmp_code == EQ)
5454 on_zero = true_val;
5455 on_nonzero = false_val;
5457 else
5459 on_zero = false_val;
5460 on_nonzero = true_val;
5463 rtx_code op_code = GET_CODE (on_nonzero);
5464 if ((op_code != CLZ && op_code != CTZ)
5465 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5466 || !CONST_INT_P (on_zero))
5467 return NULL_RTX;
5469 HOST_WIDE_INT op_val;
5470 scalar_int_mode mode ATTRIBUTE_UNUSED
5471 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5472 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5473 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5474 && op_val == INTVAL (on_zero))
5475 return on_nonzero;
5477 return NULL_RTX;
5481 /* Simplify CODE, an operation with result mode MODE and three operands,
5482 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5483 a constant. Return 0 if no simplifications is possible. */
5486 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5487 machine_mode op0_mode, rtx op0, rtx op1,
5488 rtx op2)
5490 bool any_change = false;
5491 rtx tem, trueop2;
5492 scalar_int_mode int_mode, int_op0_mode;
5494 switch (code)
5496 case FMA:
5497 /* Simplify negations around the multiplication. */
5498 /* -a * -b + c => a * b + c. */
5499 if (GET_CODE (op0) == NEG)
5501 tem = simplify_unary_operation (NEG, mode, op1, mode);
5502 if (tem)
5503 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5505 else if (GET_CODE (op1) == NEG)
5507 tem = simplify_unary_operation (NEG, mode, op0, mode);
5508 if (tem)
5509 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5512 /* Canonicalize the two multiplication operands. */
5513 /* a * -b + c => -b * a + c. */
5514 if (swap_commutative_operands_p (op0, op1))
5515 std::swap (op0, op1), any_change = true;
5517 if (any_change)
5518 return gen_rtx_FMA (mode, op0, op1, op2);
5519 return NULL_RTX;
5521 case SIGN_EXTRACT:
5522 case ZERO_EXTRACT:
5523 if (CONST_INT_P (op0)
5524 && CONST_INT_P (op1)
5525 && CONST_INT_P (op2)
5526 && is_a <scalar_int_mode> (mode, &int_mode)
5527 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5528 && HWI_COMPUTABLE_MODE_P (int_mode))
5530 /* Extracting a bit-field from a constant */
5531 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5532 HOST_WIDE_INT op1val = INTVAL (op1);
5533 HOST_WIDE_INT op2val = INTVAL (op2);
5534 if (!BITS_BIG_ENDIAN)
5535 val >>= op2val;
5536 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5537 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5538 else
5539 /* Not enough information to calculate the bit position. */
5540 break;
5542 if (HOST_BITS_PER_WIDE_INT != op1val)
5544 /* First zero-extend. */
5545 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5546 /* If desired, propagate sign bit. */
5547 if (code == SIGN_EXTRACT
5548 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5549 != 0)
5550 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5553 return gen_int_mode (val, int_mode);
5555 break;
5557 case IF_THEN_ELSE:
5558 if (CONST_INT_P (op0))
5559 return op0 != const0_rtx ? op1 : op2;
5561 /* Convert c ? a : a into "a". */
5562 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5563 return op1;
5565 /* Convert a != b ? a : b into "a". */
5566 if (GET_CODE (op0) == NE
5567 && ! side_effects_p (op0)
5568 && ! HONOR_NANS (mode)
5569 && ! HONOR_SIGNED_ZEROS (mode)
5570 && ((rtx_equal_p (XEXP (op0, 0), op1)
5571 && rtx_equal_p (XEXP (op0, 1), op2))
5572 || (rtx_equal_p (XEXP (op0, 0), op2)
5573 && rtx_equal_p (XEXP (op0, 1), op1))))
5574 return op1;
5576 /* Convert a == b ? a : b into "b". */
5577 if (GET_CODE (op0) == EQ
5578 && ! side_effects_p (op0)
5579 && ! HONOR_NANS (mode)
5580 && ! HONOR_SIGNED_ZEROS (mode)
5581 && ((rtx_equal_p (XEXP (op0, 0), op1)
5582 && rtx_equal_p (XEXP (op0, 1), op2))
5583 || (rtx_equal_p (XEXP (op0, 0), op2)
5584 && rtx_equal_p (XEXP (op0, 1), op1))))
5585 return op2;
5587 /* Convert (!c) != {0,...,0} ? a : b into
5588 c != {0,...,0} ? b : a for vector modes. */
5589 if (VECTOR_MODE_P (GET_MODE (op1))
5590 && GET_CODE (op0) == NE
5591 && GET_CODE (XEXP (op0, 0)) == NOT
5592 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5594 rtx cv = XEXP (op0, 1);
5595 int nunits = CONST_VECTOR_NUNITS (cv);
5596 bool ok = true;
5597 for (int i = 0; i < nunits; ++i)
5598 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5600 ok = false;
5601 break;
5603 if (ok)
5605 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5606 XEXP (XEXP (op0, 0), 0),
5607 XEXP (op0, 1));
5608 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5609 return retval;
5613 /* Convert x == 0 ? N : clz (x) into clz (x) when
5614 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5615 Similarly for ctz (x). */
5616 if (COMPARISON_P (op0) && !side_effects_p (op0)
5617 && XEXP (op0, 1) == const0_rtx)
5619 rtx simplified
5620 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5621 op1, op2);
5622 if (simplified)
5623 return simplified;
5626 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5628 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5629 ? GET_MODE (XEXP (op0, 1))
5630 : GET_MODE (XEXP (op0, 0)));
5631 rtx temp;
5633 /* Look for happy constants in op1 and op2. */
5634 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5636 HOST_WIDE_INT t = INTVAL (op1);
5637 HOST_WIDE_INT f = INTVAL (op2);
5639 if (t == STORE_FLAG_VALUE && f == 0)
5640 code = GET_CODE (op0);
5641 else if (t == 0 && f == STORE_FLAG_VALUE)
5643 enum rtx_code tmp;
5644 tmp = reversed_comparison_code (op0, NULL);
5645 if (tmp == UNKNOWN)
5646 break;
5647 code = tmp;
5649 else
5650 break;
5652 return simplify_gen_relational (code, mode, cmp_mode,
5653 XEXP (op0, 0), XEXP (op0, 1));
5656 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5657 cmp_mode, XEXP (op0, 0),
5658 XEXP (op0, 1));
5660 /* See if any simplifications were possible. */
5661 if (temp)
5663 if (CONST_INT_P (temp))
5664 return temp == const0_rtx ? op2 : op1;
5665 else if (temp)
5666 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5669 break;
5671 case VEC_MERGE:
5672 gcc_assert (GET_MODE (op0) == mode);
5673 gcc_assert (GET_MODE (op1) == mode);
5674 gcc_assert (VECTOR_MODE_P (mode));
5675 trueop2 = avoid_constant_pool_reference (op2);
5676 if (CONST_INT_P (trueop2))
5678 unsigned n_elts = GET_MODE_NUNITS (mode);
5679 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5680 unsigned HOST_WIDE_INT mask;
5681 if (n_elts == HOST_BITS_PER_WIDE_INT)
5682 mask = -1;
5683 else
5684 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5686 if (!(sel & mask) && !side_effects_p (op0))
5687 return op1;
5688 if ((sel & mask) == mask && !side_effects_p (op1))
5689 return op0;
5691 rtx trueop0 = avoid_constant_pool_reference (op0);
5692 rtx trueop1 = avoid_constant_pool_reference (op1);
5693 if (GET_CODE (trueop0) == CONST_VECTOR
5694 && GET_CODE (trueop1) == CONST_VECTOR)
5696 rtvec v = rtvec_alloc (n_elts);
5697 unsigned int i;
5699 for (i = 0; i < n_elts; i++)
5700 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5701 ? CONST_VECTOR_ELT (trueop0, i)
5702 : CONST_VECTOR_ELT (trueop1, i));
5703 return gen_rtx_CONST_VECTOR (mode, v);
5706 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5707 if no element from a appears in the result. */
5708 if (GET_CODE (op0) == VEC_MERGE)
5710 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5711 if (CONST_INT_P (tem))
5713 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5714 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5715 return simplify_gen_ternary (code, mode, mode,
5716 XEXP (op0, 1), op1, op2);
5717 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5718 return simplify_gen_ternary (code, mode, mode,
5719 XEXP (op0, 0), op1, op2);
5722 if (GET_CODE (op1) == VEC_MERGE)
5724 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5725 if (CONST_INT_P (tem))
5727 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5728 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5729 return simplify_gen_ternary (code, mode, mode,
5730 op0, XEXP (op1, 1), op2);
5731 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5732 return simplify_gen_ternary (code, mode, mode,
5733 op0, XEXP (op1, 0), op2);
5737 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5738 with a. */
5739 if (GET_CODE (op0) == VEC_DUPLICATE
5740 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5741 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5742 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5744 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5745 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5747 if (XEXP (XEXP (op0, 0), 0) == op1
5748 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5749 return op1;
5752 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5753 (const_int N))
5754 with (vec_concat (X) (B)) if N == 1 or
5755 (vec_concat (A) (X)) if N == 2. */
5756 if (GET_CODE (op0) == VEC_DUPLICATE
5757 && GET_CODE (op1) == CONST_VECTOR
5758 && CONST_VECTOR_NUNITS (op1) == 2
5759 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5760 && IN_RANGE (sel, 1, 2))
5762 rtx newop0 = XEXP (op0, 0);
5763 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
5764 if (sel == 2)
5765 std::swap (newop0, newop1);
5766 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5768 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5769 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5770 Only applies for vectors of two elements. */
5771 if (GET_CODE (op0) == VEC_DUPLICATE
5772 && GET_CODE (op1) == VEC_CONCAT
5773 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5774 && GET_MODE_NUNITS (GET_MODE (op1)) == 2
5775 && IN_RANGE (sel, 1, 2))
5777 rtx newop0 = XEXP (op0, 0);
5778 rtx newop1 = XEXP (op1, 2 - sel);
5779 rtx otherop = XEXP (op1, sel - 1);
5780 if (sel == 2)
5781 std::swap (newop0, newop1);
5782 /* Don't want to throw away the other part of the vec_concat if
5783 it has side-effects. */
5784 if (!side_effects_p (otherop))
5785 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5788 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5789 (const_int n))
5790 with (vec_concat x y) or (vec_concat y x) depending on value
5791 of N. */
5792 if (GET_CODE (op0) == VEC_DUPLICATE
5793 && GET_CODE (op1) == VEC_DUPLICATE
5794 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5795 && GET_MODE_NUNITS (GET_MODE (op1)) == 2
5796 && IN_RANGE (sel, 1, 2))
5798 rtx newop0 = XEXP (op0, 0);
5799 rtx newop1 = XEXP (op1, 0);
5800 if (sel == 2)
5801 std::swap (newop0, newop1);
5803 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5807 if (rtx_equal_p (op0, op1)
5808 && !side_effects_p (op2) && !side_effects_p (op1))
5809 return op0;
5811 break;
5813 default:
5814 gcc_unreachable ();
5817 return 0;
5820 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5821 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5822 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5824 Works by unpacking OP into a collection of 8-bit values
5825 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5826 and then repacking them again for OUTERMODE. */
5828 static rtx
5829 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
5830 fixed_size_mode innermode, unsigned int byte)
5832 enum {
5833 value_bit = 8,
5834 value_mask = (1 << value_bit) - 1
5836 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5837 int value_start;
5838 int i;
5839 int elem;
5841 int num_elem;
5842 rtx * elems;
5843 int elem_bitsize;
5844 rtx result_s = NULL;
5845 rtvec result_v = NULL;
5846 enum mode_class outer_class;
5847 scalar_mode outer_submode;
5848 int max_bitsize;
5850 /* Some ports misuse CCmode. */
5851 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5852 return op;
5854 /* We have no way to represent a complex constant at the rtl level. */
5855 if (COMPLEX_MODE_P (outermode))
5856 return NULL_RTX;
5858 /* We support any size mode. */
5859 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5860 GET_MODE_BITSIZE (innermode));
5862 /* Unpack the value. */
5864 if (GET_CODE (op) == CONST_VECTOR)
5866 num_elem = CONST_VECTOR_NUNITS (op);
5867 elems = &CONST_VECTOR_ELT (op, 0);
5868 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5870 else
5872 num_elem = 1;
5873 elems = &op;
5874 elem_bitsize = max_bitsize;
5876 /* If this asserts, it is too complicated; reducing value_bit may help. */
5877 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5878 /* I don't know how to handle endianness of sub-units. */
5879 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5881 for (elem = 0; elem < num_elem; elem++)
5883 unsigned char * vp;
5884 rtx el = elems[elem];
5886 /* Vectors are kept in target memory order. (This is probably
5887 a mistake.) */
5889 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5890 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5891 / BITS_PER_UNIT);
5892 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5893 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5894 unsigned bytele = (subword_byte % UNITS_PER_WORD
5895 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5896 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5899 switch (GET_CODE (el))
5901 case CONST_INT:
5902 for (i = 0;
5903 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5904 i += value_bit)
5905 *vp++ = INTVAL (el) >> i;
5906 /* CONST_INTs are always logically sign-extended. */
5907 for (; i < elem_bitsize; i += value_bit)
5908 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5909 break;
5911 case CONST_WIDE_INT:
5913 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
5914 unsigned char extend = wi::sign_mask (val);
5915 int prec = wi::get_precision (val);
5917 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
5918 *vp++ = wi::extract_uhwi (val, i, value_bit);
5919 for (; i < elem_bitsize; i += value_bit)
5920 *vp++ = extend;
5922 break;
5924 case CONST_DOUBLE:
5925 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5927 unsigned char extend = 0;
5928 /* If this triggers, someone should have generated a
5929 CONST_INT instead. */
5930 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5932 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5933 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5934 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5936 *vp++
5937 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5938 i += value_bit;
5941 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5942 extend = -1;
5943 for (; i < elem_bitsize; i += value_bit)
5944 *vp++ = extend;
5946 else
5948 /* This is big enough for anything on the platform. */
5949 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5950 scalar_float_mode el_mode;
5952 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
5953 int bitsize = GET_MODE_BITSIZE (el_mode);
5955 gcc_assert (bitsize <= elem_bitsize);
5956 gcc_assert (bitsize % value_bit == 0);
5958 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5959 GET_MODE (el));
5961 /* real_to_target produces its result in words affected by
5962 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5963 and use WORDS_BIG_ENDIAN instead; see the documentation
5964 of SUBREG in rtl.texi. */
5965 for (i = 0; i < bitsize; i += value_bit)
5967 int ibase;
5968 if (WORDS_BIG_ENDIAN)
5969 ibase = bitsize - 1 - i;
5970 else
5971 ibase = i;
5972 *vp++ = tmp[ibase / 32] >> i % 32;
5975 /* It shouldn't matter what's done here, so fill it with
5976 zero. */
5977 for (; i < elem_bitsize; i += value_bit)
5978 *vp++ = 0;
5980 break;
5982 case CONST_FIXED:
5983 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5985 for (i = 0; i < elem_bitsize; i += value_bit)
5986 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5988 else
5990 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5991 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5992 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5993 i += value_bit)
5994 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5995 >> (i - HOST_BITS_PER_WIDE_INT);
5996 for (; i < elem_bitsize; i += value_bit)
5997 *vp++ = 0;
5999 break;
6001 default:
6002 gcc_unreachable ();
6006 /* Now, pick the right byte to start with. */
6007 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6008 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6009 will already have offset 0. */
6010 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
6012 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
6013 - byte);
6014 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6015 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6016 byte = (subword_byte % UNITS_PER_WORD
6017 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6020 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6021 so if it's become negative it will instead be very large.) */
6022 gcc_assert (byte < GET_MODE_SIZE (innermode));
6024 /* Convert from bytes to chunks of size value_bit. */
6025 value_start = byte * (BITS_PER_UNIT / value_bit);
6027 /* Re-pack the value. */
6028 num_elem = GET_MODE_NUNITS (outermode);
6030 if (VECTOR_MODE_P (outermode))
6032 result_v = rtvec_alloc (num_elem);
6033 elems = &RTVEC_ELT (result_v, 0);
6035 else
6036 elems = &result_s;
6038 outer_submode = GET_MODE_INNER (outermode);
6039 outer_class = GET_MODE_CLASS (outer_submode);
6040 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6042 gcc_assert (elem_bitsize % value_bit == 0);
6043 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6045 for (elem = 0; elem < num_elem; elem++)
6047 unsigned char *vp;
6049 /* Vectors are stored in target memory order. (This is probably
6050 a mistake.) */
6052 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6053 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6054 / BITS_PER_UNIT);
6055 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6056 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6057 unsigned bytele = (subword_byte % UNITS_PER_WORD
6058 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6059 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6062 switch (outer_class)
6064 case MODE_INT:
6065 case MODE_PARTIAL_INT:
6067 int u;
6068 int base = 0;
6069 int units
6070 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6071 / HOST_BITS_PER_WIDE_INT;
6072 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6073 wide_int r;
6075 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6076 return NULL_RTX;
6077 for (u = 0; u < units; u++)
6079 unsigned HOST_WIDE_INT buf = 0;
6080 for (i = 0;
6081 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6082 i += value_bit)
6083 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6085 tmp[u] = buf;
6086 base += HOST_BITS_PER_WIDE_INT;
6088 r = wide_int::from_array (tmp, units,
6089 GET_MODE_PRECISION (outer_submode));
6090 #if TARGET_SUPPORTS_WIDE_INT == 0
6091 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6092 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6093 return NULL_RTX;
6094 #endif
6095 elems[elem] = immed_wide_int_const (r, outer_submode);
6097 break;
6099 case MODE_FLOAT:
6100 case MODE_DECIMAL_FLOAT:
6102 REAL_VALUE_TYPE r;
6103 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6105 /* real_from_target wants its input in words affected by
6106 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6107 and use WORDS_BIG_ENDIAN instead; see the documentation
6108 of SUBREG in rtl.texi. */
6109 for (i = 0; i < elem_bitsize; i += value_bit)
6111 int ibase;
6112 if (WORDS_BIG_ENDIAN)
6113 ibase = elem_bitsize - 1 - i;
6114 else
6115 ibase = i;
6116 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6119 real_from_target (&r, tmp, outer_submode);
6120 elems[elem] = const_double_from_real_value (r, outer_submode);
6122 break;
6124 case MODE_FRACT:
6125 case MODE_UFRACT:
6126 case MODE_ACCUM:
6127 case MODE_UACCUM:
6129 FIXED_VALUE_TYPE f;
6130 f.data.low = 0;
6131 f.data.high = 0;
6132 f.mode = outer_submode;
6134 for (i = 0;
6135 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6136 i += value_bit)
6137 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6138 for (; i < elem_bitsize; i += value_bit)
6139 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6140 << (i - HOST_BITS_PER_WIDE_INT));
6142 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6144 break;
6146 default:
6147 gcc_unreachable ();
6150 if (VECTOR_MODE_P (outermode))
6151 return gen_rtx_CONST_VECTOR (outermode, result_v);
6152 else
6153 return result_s;
6156 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6157 Return 0 if no simplifications are possible. */
6159 simplify_subreg (machine_mode outermode, rtx op,
6160 machine_mode innermode, unsigned int byte)
6162 /* Little bit of sanity checking. */
6163 gcc_assert (innermode != VOIDmode);
6164 gcc_assert (outermode != VOIDmode);
6165 gcc_assert (innermode != BLKmode);
6166 gcc_assert (outermode != BLKmode);
6168 gcc_assert (GET_MODE (op) == innermode
6169 || GET_MODE (op) == VOIDmode);
6171 if ((byte % GET_MODE_SIZE (outermode)) != 0)
6172 return NULL_RTX;
6174 if (byte >= GET_MODE_SIZE (innermode))
6175 return NULL_RTX;
6177 if (outermode == innermode && !byte)
6178 return op;
6180 if (byte % GET_MODE_UNIT_SIZE (innermode) == 0)
6182 rtx elt;
6184 if (VECTOR_MODE_P (outermode)
6185 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6186 && vec_duplicate_p (op, &elt))
6187 return gen_vec_duplicate (outermode, elt);
6189 if (outermode == GET_MODE_INNER (innermode)
6190 && vec_duplicate_p (op, &elt))
6191 return elt;
6194 if (CONST_SCALAR_INT_P (op)
6195 || CONST_DOUBLE_AS_FLOAT_P (op)
6196 || GET_CODE (op) == CONST_FIXED
6197 || GET_CODE (op) == CONST_VECTOR)
6199 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6200 the result from bytes, so it only works if the sizes of the modes
6201 are known at compile time. Cases that apply to general modes
6202 should be handled here before calling simplify_immed_subreg. */
6203 fixed_size_mode fs_outermode, fs_innermode;
6204 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6205 && is_a <fixed_size_mode> (innermode, &fs_innermode))
6206 return simplify_immed_subreg (fs_outermode, op, fs_innermode, byte);
6208 return NULL_RTX;
6211 /* Changing mode twice with SUBREG => just change it once,
6212 or not at all if changing back op starting mode. */
6213 if (GET_CODE (op) == SUBREG)
6215 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6216 rtx newx;
6218 if (outermode == innermostmode
6219 && byte == 0 && SUBREG_BYTE (op) == 0)
6220 return SUBREG_REG (op);
6222 /* Work out the memory offset of the final OUTERMODE value relative
6223 to the inner value of OP. */
6224 HOST_WIDE_INT mem_offset = subreg_memory_offset (outermode,
6225 innermode, byte);
6226 HOST_WIDE_INT op_mem_offset = subreg_memory_offset (op);
6227 HOST_WIDE_INT final_offset = mem_offset + op_mem_offset;
6229 /* See whether resulting subreg will be paradoxical. */
6230 if (!paradoxical_subreg_p (outermode, innermostmode))
6232 /* In nonparadoxical subregs we can't handle negative offsets. */
6233 if (final_offset < 0)
6234 return NULL_RTX;
6235 /* Bail out in case resulting subreg would be incorrect. */
6236 if (final_offset % GET_MODE_SIZE (outermode)
6237 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
6238 return NULL_RTX;
6240 else
6242 HOST_WIDE_INT required_offset
6243 = subreg_memory_offset (outermode, innermostmode, 0);
6244 if (final_offset != required_offset)
6245 return NULL_RTX;
6246 /* Paradoxical subregs always have byte offset 0. */
6247 final_offset = 0;
6250 /* Recurse for further possible simplifications. */
6251 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6252 final_offset);
6253 if (newx)
6254 return newx;
6255 if (validate_subreg (outermode, innermostmode,
6256 SUBREG_REG (op), final_offset))
6258 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6259 if (SUBREG_PROMOTED_VAR_P (op)
6260 && SUBREG_PROMOTED_SIGN (op) >= 0
6261 && GET_MODE_CLASS (outermode) == MODE_INT
6262 && IN_RANGE (GET_MODE_SIZE (outermode),
6263 GET_MODE_SIZE (innermode),
6264 GET_MODE_SIZE (innermostmode))
6265 && subreg_lowpart_p (newx))
6267 SUBREG_PROMOTED_VAR_P (newx) = 1;
6268 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6270 return newx;
6272 return NULL_RTX;
6275 /* SUBREG of a hard register => just change the register number
6276 and/or mode. If the hard register is not valid in that mode,
6277 suppress this simplification. If the hard register is the stack,
6278 frame, or argument pointer, leave this as a SUBREG. */
6280 if (REG_P (op) && HARD_REGISTER_P (op))
6282 unsigned int regno, final_regno;
6284 regno = REGNO (op);
6285 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6286 if (HARD_REGISTER_NUM_P (final_regno))
6288 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6289 subreg_memory_offset (outermode,
6290 innermode, byte));
6292 /* Propagate original regno. We don't have any way to specify
6293 the offset inside original regno, so do so only for lowpart.
6294 The information is used only by alias analysis that can not
6295 grog partial register anyway. */
6297 if (subreg_lowpart_offset (outermode, innermode) == byte)
6298 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6299 return x;
6303 /* If we have a SUBREG of a register that we are replacing and we are
6304 replacing it with a MEM, make a new MEM and try replacing the
6305 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6306 or if we would be widening it. */
6308 if (MEM_P (op)
6309 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6310 /* Allow splitting of volatile memory references in case we don't
6311 have instruction to move the whole thing. */
6312 && (! MEM_VOLATILE_P (op)
6313 || ! have_insn_for (SET, innermode))
6314 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6315 return adjust_address_nv (op, outermode, byte);
6317 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6318 of two parts. */
6319 if (GET_CODE (op) == CONCAT
6320 || GET_CODE (op) == VEC_CONCAT)
6322 unsigned int part_size, final_offset;
6323 rtx part, res;
6325 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6326 if (part_mode == VOIDmode)
6327 part_mode = GET_MODE_INNER (GET_MODE (op));
6328 part_size = GET_MODE_SIZE (part_mode);
6329 if (byte < part_size)
6331 part = XEXP (op, 0);
6332 final_offset = byte;
6334 else
6336 part = XEXP (op, 1);
6337 final_offset = byte - part_size;
6340 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6341 return NULL_RTX;
6343 part_mode = GET_MODE (part);
6344 if (part_mode == VOIDmode)
6345 part_mode = GET_MODE_INNER (GET_MODE (op));
6346 res = simplify_subreg (outermode, part, part_mode, final_offset);
6347 if (res)
6348 return res;
6349 if (validate_subreg (outermode, part_mode, part, final_offset))
6350 return gen_rtx_SUBREG (outermode, part, final_offset);
6351 return NULL_RTX;
6354 /* A SUBREG resulting from a zero extension may fold to zero if
6355 it extracts higher bits that the ZERO_EXTEND's source bits. */
6356 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6358 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6359 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6360 return CONST0_RTX (outermode);
6363 scalar_int_mode int_outermode, int_innermode;
6364 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6365 && is_a <scalar_int_mode> (innermode, &int_innermode)
6366 && (GET_MODE_PRECISION (int_outermode)
6367 < GET_MODE_PRECISION (int_innermode))
6368 && byte == subreg_lowpart_offset (int_outermode, int_innermode))
6370 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6371 if (tem)
6372 return tem;
6375 return NULL_RTX;
6378 /* Make a SUBREG operation or equivalent if it folds. */
6381 simplify_gen_subreg (machine_mode outermode, rtx op,
6382 machine_mode innermode, unsigned int byte)
6384 rtx newx;
6386 newx = simplify_subreg (outermode, op, innermode, byte);
6387 if (newx)
6388 return newx;
6390 if (GET_CODE (op) == SUBREG
6391 || GET_CODE (op) == CONCAT
6392 || GET_MODE (op) == VOIDmode)
6393 return NULL_RTX;
6395 if (validate_subreg (outermode, innermode, op, byte))
6396 return gen_rtx_SUBREG (outermode, op, byte);
6398 return NULL_RTX;
6401 /* Generates a subreg to get the least significant part of EXPR (in mode
6402 INNER_MODE) to OUTER_MODE. */
6405 lowpart_subreg (machine_mode outer_mode, rtx expr,
6406 machine_mode inner_mode)
6408 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6409 subreg_lowpart_offset (outer_mode, inner_mode));
6412 /* Simplify X, an rtx expression.
6414 Return the simplified expression or NULL if no simplifications
6415 were possible.
6417 This is the preferred entry point into the simplification routines;
6418 however, we still allow passes to call the more specific routines.
6420 Right now GCC has three (yes, three) major bodies of RTL simplification
6421 code that need to be unified.
6423 1. fold_rtx in cse.c. This code uses various CSE specific
6424 information to aid in RTL simplification.
6426 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6427 it uses combine specific information to aid in RTL
6428 simplification.
6430 3. The routines in this file.
6433 Long term we want to only have one body of simplification code; to
6434 get to that state I recommend the following steps:
6436 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6437 which are not pass dependent state into these routines.
6439 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6440 use this routine whenever possible.
6442 3. Allow for pass dependent state to be provided to these
6443 routines and add simplifications based on the pass dependent
6444 state. Remove code from cse.c & combine.c that becomes
6445 redundant/dead.
6447 It will take time, but ultimately the compiler will be easier to
6448 maintain and improve. It's totally silly that when we add a
6449 simplification that it needs to be added to 4 places (3 for RTL
6450 simplification and 1 for tree simplification. */
6453 simplify_rtx (const_rtx x)
6455 const enum rtx_code code = GET_CODE (x);
6456 const machine_mode mode = GET_MODE (x);
6458 switch (GET_RTX_CLASS (code))
6460 case RTX_UNARY:
6461 return simplify_unary_operation (code, mode,
6462 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6463 case RTX_COMM_ARITH:
6464 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6465 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6467 /* Fall through. */
6469 case RTX_BIN_ARITH:
6470 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6472 case RTX_TERNARY:
6473 case RTX_BITFIELD_OPS:
6474 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6475 XEXP (x, 0), XEXP (x, 1),
6476 XEXP (x, 2));
6478 case RTX_COMPARE:
6479 case RTX_COMM_COMPARE:
6480 return simplify_relational_operation (code, mode,
6481 ((GET_MODE (XEXP (x, 0))
6482 != VOIDmode)
6483 ? GET_MODE (XEXP (x, 0))
6484 : GET_MODE (XEXP (x, 1))),
6485 XEXP (x, 0),
6486 XEXP (x, 1));
6488 case RTX_EXTRA:
6489 if (code == SUBREG)
6490 return simplify_subreg (mode, SUBREG_REG (x),
6491 GET_MODE (SUBREG_REG (x)),
6492 SUBREG_BYTE (x));
6493 break;
6495 case RTX_OBJ:
6496 if (code == LO_SUM)
6498 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6499 if (GET_CODE (XEXP (x, 0)) == HIGH
6500 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6501 return XEXP (x, 1);
6503 break;
6505 default:
6506 break;
6508 return NULL;
6511 #if CHECKING_P
6513 namespace selftest {
6515 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6517 static rtx
6518 make_test_reg (machine_mode mode)
6520 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6522 return gen_rtx_REG (mode, test_reg_num++);
6525 /* Test vector simplifications involving VEC_DUPLICATE in which the
6526 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6527 register that holds one element of MODE. */
6529 static void
6530 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6532 scalar_mode inner_mode = GET_MODE_INNER (mode);
6533 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6534 unsigned int nunits = GET_MODE_NUNITS (mode);
6535 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6537 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6538 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6539 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6540 ASSERT_RTX_EQ (duplicate,
6541 simplify_unary_operation (NOT, mode,
6542 duplicate_not, mode));
6544 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6545 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6546 ASSERT_RTX_EQ (duplicate,
6547 simplify_unary_operation (NEG, mode,
6548 duplicate_neg, mode));
6550 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6551 ASSERT_RTX_EQ (duplicate,
6552 simplify_binary_operation (PLUS, mode, duplicate,
6553 CONST0_RTX (mode)));
6555 ASSERT_RTX_EQ (duplicate,
6556 simplify_binary_operation (MINUS, mode, duplicate,
6557 CONST0_RTX (mode)));
6559 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6560 simplify_binary_operation (MINUS, mode, duplicate,
6561 duplicate));
6564 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6565 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6566 ASSERT_RTX_PTR_EQ (scalar_reg,
6567 simplify_binary_operation (VEC_SELECT, inner_mode,
6568 duplicate, zero_par));
6570 /* And again with the final element. */
6571 rtx last_index = gen_int_mode (GET_MODE_NUNITS (mode) - 1, word_mode);
6572 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6573 ASSERT_RTX_PTR_EQ (scalar_reg,
6574 simplify_binary_operation (VEC_SELECT, inner_mode,
6575 duplicate, last_par));
6577 /* Test a scalar subreg of a VEC_DUPLICATE. */
6578 unsigned int offset = subreg_lowpart_offset (inner_mode, mode);
6579 ASSERT_RTX_EQ (scalar_reg,
6580 simplify_gen_subreg (inner_mode, duplicate,
6581 mode, offset));
6583 machine_mode narrower_mode;
6584 if (nunits > 2
6585 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6586 && VECTOR_MODE_P (narrower_mode))
6588 /* Test VEC_SELECT of a vector. */
6589 rtx vec_par
6590 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6591 rtx narrower_duplicate
6592 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6593 ASSERT_RTX_EQ (narrower_duplicate,
6594 simplify_binary_operation (VEC_SELECT, narrower_mode,
6595 duplicate, vec_par));
6597 /* Test a vector subreg of a VEC_DUPLICATE. */
6598 unsigned int offset = subreg_lowpart_offset (narrower_mode, mode);
6599 ASSERT_RTX_EQ (narrower_duplicate,
6600 simplify_gen_subreg (narrower_mode, duplicate,
6601 mode, offset));
6605 /* Test vector simplifications involving VEC_SERIES in which the
6606 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6607 register that holds one element of MODE. */
6609 static void
6610 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6612 /* Test unary cases with VEC_SERIES arguments. */
6613 scalar_mode inner_mode = GET_MODE_INNER (mode);
6614 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6615 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6616 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6617 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6618 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6619 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6620 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6621 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6622 neg_scalar_reg);
6623 ASSERT_RTX_EQ (series_0_r,
6624 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6625 ASSERT_RTX_EQ (series_r_m1,
6626 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6627 ASSERT_RTX_EQ (series_r_r,
6628 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6630 /* Test that a VEC_SERIES with a zero step is simplified away. */
6631 ASSERT_RTX_EQ (duplicate,
6632 simplify_binary_operation (VEC_SERIES, mode,
6633 scalar_reg, const0_rtx));
6635 /* Test PLUS and MINUS with VEC_SERIES. */
6636 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6637 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6638 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6639 ASSERT_RTX_EQ (series_r_r,
6640 simplify_binary_operation (PLUS, mode, series_0_r,
6641 duplicate));
6642 ASSERT_RTX_EQ (series_r_1,
6643 simplify_binary_operation (PLUS, mode, duplicate,
6644 series_0_1));
6645 ASSERT_RTX_EQ (series_r_m1,
6646 simplify_binary_operation (PLUS, mode, duplicate,
6647 series_0_m1));
6648 ASSERT_RTX_EQ (series_0_r,
6649 simplify_binary_operation (MINUS, mode, series_r_r,
6650 duplicate));
6651 ASSERT_RTX_EQ (series_r_m1,
6652 simplify_binary_operation (MINUS, mode, duplicate,
6653 series_0_1));
6654 ASSERT_RTX_EQ (series_r_1,
6655 simplify_binary_operation (MINUS, mode, duplicate,
6656 series_0_m1));
6657 ASSERT_RTX_EQ (series_0_m1,
6658 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
6659 constm1_rtx));
6662 /* Verify some simplifications involving vectors. */
6664 static void
6665 test_vector_ops ()
6667 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
6669 machine_mode mode = (machine_mode) i;
6670 if (VECTOR_MODE_P (mode))
6672 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
6673 test_vector_ops_duplicate (mode, scalar_reg);
6674 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6675 && GET_MODE_NUNITS (mode) > 2)
6676 test_vector_ops_series (mode, scalar_reg);
6681 /* Run all of the selftests within this file. */
6683 void
6684 simplify_rtx_c_tests ()
6686 test_vector_ops ();
6689 } // namespace selftest
6691 #endif /* CHECKING_P */