Merge aosp-toolchain/gcc/gcc-4_9 changes.
[official-gcc.git] / gcc-4_9 / gcc / simplify-rtx.c
blobb98e507628dfe5948209f1d9248acbbb31ce91f0
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "ggc.h"
38 #include "target.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
45 signed wide int. */
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49 static rtx neg_const_int (enum machine_mode, const_rtx);
50 static bool plus_minus_operand_p (const_rtx);
51 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
52 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
53 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
54 unsigned int);
55 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
56 rtx, rtx);
57 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
58 enum machine_mode, rtx, rtx);
59 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
60 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
61 rtx, rtx, rtx, rtx);
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
65 static rtx
66 neg_const_int (enum machine_mode mode, const_rtx i)
68 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
74 bool
75 mode_signbit_p (enum machine_mode mode, const_rtx x)
77 unsigned HOST_WIDE_INT val;
78 unsigned int width;
80 if (GET_MODE_CLASS (mode) != MODE_INT)
81 return false;
83 width = GET_MODE_PRECISION (mode);
84 if (width == 0)
85 return false;
87 if (width <= HOST_BITS_PER_WIDE_INT
88 && CONST_INT_P (x))
89 val = INTVAL (x);
90 else if (width <= HOST_BITS_PER_DOUBLE_INT
91 && CONST_DOUBLE_AS_INT_P (x)
92 && CONST_DOUBLE_LOW (x) == 0)
94 val = CONST_DOUBLE_HIGH (x);
95 width -= HOST_BITS_PER_WIDE_INT;
97 else
98 /* FIXME: We don't yet have a representation for wider modes. */
99 return false;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Test whether VAL is equal to the most significant bit of mode MODE
107 (after masking with the mode mask of MODE). Returns false if the
108 precision of MODE is too large to handle. */
110 bool
111 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 unsigned int width;
115 if (GET_MODE_CLASS (mode) != MODE_INT)
116 return false;
118 width = GET_MODE_PRECISION (mode);
119 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
120 return false;
122 val &= GET_MODE_MASK (mode);
123 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
126 /* Test whether the most significant bit of mode MODE is set in VAL.
127 Returns false if the precision of MODE is too large to handle. */
128 bool
129 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 unsigned int width;
133 if (GET_MODE_CLASS (mode) != MODE_INT)
134 return false;
136 width = GET_MODE_PRECISION (mode);
137 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
138 return false;
140 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
141 return val != 0;
144 /* Test whether the most significant bit of mode MODE is clear in VAL.
145 Returns false if the precision of MODE is too large to handle. */
146 bool
147 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 unsigned int width;
151 if (GET_MODE_CLASS (mode) != MODE_INT)
152 return false;
154 width = GET_MODE_PRECISION (mode);
155 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
156 return false;
158 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
159 return val == 0;
162 /* Make a binary operation by properly ordering the operands and
163 seeing if the expression folds. */
166 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
167 rtx op1)
169 rtx tem;
171 /* If this simplifies, do it. */
172 tem = simplify_binary_operation (code, mode, op0, op1);
173 if (tem)
174 return tem;
176 /* Put complex operands first and constants second if commutative. */
177 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
178 && swap_commutative_operands_p (op0, op1))
179 tem = op0, op0 = op1, op1 = tem;
181 return gen_rtx_fmt_ee (code, mode, op0, op1);
184 /* If X is a MEM referencing the constant pool, return the real value.
185 Otherwise return X. */
187 avoid_constant_pool_reference (rtx x)
189 rtx c, tmp, addr;
190 enum machine_mode cmode;
191 HOST_WIDE_INT offset = 0;
193 switch (GET_CODE (x))
195 case MEM:
196 break;
198 case FLOAT_EXTEND:
199 /* Handle float extensions of constant pool references. */
200 tmp = XEXP (x, 0);
201 c = avoid_constant_pool_reference (tmp);
202 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
204 REAL_VALUE_TYPE d;
206 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
207 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 return x;
211 default:
212 return x;
215 if (GET_MODE (x) == BLKmode)
216 return x;
218 addr = XEXP (x, 0);
220 /* Call target hook to avoid the effects of -fpic etc.... */
221 addr = targetm.delegitimize_address (addr);
223 /* Split the address into a base and integer offset. */
224 if (GET_CODE (addr) == CONST
225 && GET_CODE (XEXP (addr, 0)) == PLUS
226 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
229 addr = XEXP (XEXP (addr, 0), 0);
232 if (GET_CODE (addr) == LO_SUM)
233 addr = XEXP (addr, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr))
240 c = get_pool_constant (addr);
241 cmode = get_pool_mode (addr);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if ((offset != 0 || cmode != GET_MODE (x))
247 && offset >= 0 && offset < GET_MODE_SIZE (cmode))
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
253 else
254 return c;
257 return x;
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
277 switch (TREE_CODE (decl))
279 default:
280 decl = NULL;
281 break;
283 case VAR_DECL:
284 break;
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !tree_fits_shwi_p (toffset)))
303 decl = NULL;
304 else
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += tree_to_shwi (toffset);
310 break;
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
322 rtx newx;
324 offset += MEM_OFFSET (x);
326 newx = DECL_RTL (decl);
328 if (MEM_P (newx))
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
357 return x;
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
367 rtx tem;
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
373 return gen_rtx_fmt_e (code, mode, op);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 rtx tem;
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 rtx tem;
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
425 if (__builtin_expect (fn != NULL, 0))
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
434 switch (GET_RTX_CLASS (code))
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476 case RTX_EXTRA:
477 if (code == SUBREG)
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
487 break;
489 case RTX_OBJ:
490 if (code == MEM)
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
497 else if (code == LO_SUM)
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
506 /* (lo_sum (high x) (const (plus x ofs))) -> (const (plus x ofs)) */
507 if (GET_CODE (op0) == HIGH && GET_CODE (op1) == CONST
508 && GET_CODE(XEXP (op1, 0)) == PLUS
509 && rtx_equal_p (XEXP (XEXP (op1, 0), 0), XEXP (op0, 0)))
510 return op1;
512 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
513 return x;
514 return gen_rtx_LO_SUM (mode, op0, op1);
516 break;
518 default:
519 break;
522 newx = x;
523 fmt = GET_RTX_FORMAT (code);
524 for (i = 0; fmt[i]; i++)
525 switch (fmt[i])
527 case 'E':
528 vec = XVEC (x, i);
529 newvec = XVEC (newx, i);
530 for (j = 0; j < GET_NUM_ELEM (vec); j++)
532 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
533 old_rtx, fn, data);
534 if (op != RTVEC_ELT (vec, j))
536 if (newvec == vec)
538 newvec = shallow_copy_rtvec (vec);
539 if (x == newx)
540 newx = shallow_copy_rtx (x);
541 XVEC (newx, i) = newvec;
543 RTVEC_ELT (newvec, j) = op;
546 break;
548 case 'e':
549 if (XEXP (x, i))
551 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
552 if (op != XEXP (x, i))
554 if (x == newx)
555 newx = shallow_copy_rtx (x);
556 XEXP (newx, i) = op;
559 break;
561 return newx;
564 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
565 resulting RTX. Return a new RTX which is as simplified as possible. */
568 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
570 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
573 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
574 Only handle cases where the truncated value is inherently an rvalue.
576 RTL provides two ways of truncating a value:
578 1. a lowpart subreg. This form is only a truncation when both
579 the outer and inner modes (here MODE and OP_MODE respectively)
580 are scalar integers, and only then when the subreg is used as
581 an rvalue.
583 It is only valid to form such truncating subregs if the
584 truncation requires no action by the target. The onus for
585 proving this is on the creator of the subreg -- e.g. the
586 caller to simplify_subreg or simplify_gen_subreg -- and typically
587 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
589 2. a TRUNCATE. This form handles both scalar and compound integers.
591 The first form is preferred where valid. However, the TRUNCATE
592 handling in simplify_unary_operation turns the second form into the
593 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
594 so it is generally safe to form rvalue truncations using:
596 simplify_gen_unary (TRUNCATE, ...)
598 and leave simplify_unary_operation to work out which representation
599 should be used.
601 Because of the proof requirements on (1), simplify_truncation must
602 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
603 regardless of whether the outer truncation came from a SUBREG or a
604 TRUNCATE. For example, if the caller has proven that an SImode
605 truncation of:
607 (and:DI X Y)
609 is a no-op and can be represented as a subreg, it does not follow
610 that SImode truncations of X and Y are also no-ops. On a target
611 like 64-bit MIPS that requires SImode values to be stored in
612 sign-extended form, an SImode truncation of:
614 (and:DI (reg:DI X) (const_int 63))
616 is trivially a no-op because only the lower 6 bits can be set.
617 However, X is still an arbitrary 64-bit number and so we cannot
618 assume that truncating it too is a no-op. */
620 static rtx
621 simplify_truncation (enum machine_mode mode, rtx op,
622 enum machine_mode op_mode)
624 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
625 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
626 gcc_assert (precision <= op_precision);
628 /* Optimize truncations of zero and sign extended values. */
629 if (GET_CODE (op) == ZERO_EXTEND
630 || GET_CODE (op) == SIGN_EXTEND)
632 /* There are three possibilities. If MODE is the same as the
633 origmode, we can omit both the extension and the subreg.
634 If MODE is not larger than the origmode, we can apply the
635 truncation without the extension. Finally, if the outermode
636 is larger than the origmode, we can just extend to the appropriate
637 mode. */
638 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
639 if (mode == origmode)
640 return XEXP (op, 0);
641 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
642 return simplify_gen_unary (TRUNCATE, mode,
643 XEXP (op, 0), origmode);
644 else
645 return simplify_gen_unary (GET_CODE (op), mode,
646 XEXP (op, 0), origmode);
649 /* If the machine can perform operations in the truncated mode, distribute
650 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
651 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
652 if (1
653 #ifdef WORD_REGISTER_OPERATIONS
654 && precision >= BITS_PER_WORD
655 #endif
656 && (GET_CODE (op) == PLUS
657 || GET_CODE (op) == MINUS
658 || GET_CODE (op) == MULT))
660 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
661 if (op0)
663 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
664 if (op1)
665 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
669 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
670 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
671 the outer subreg is effectively a truncation to the original mode. */
672 if ((GET_CODE (op) == LSHIFTRT
673 || GET_CODE (op) == ASHIFTRT)
674 /* Ensure that OP_MODE is at least twice as wide as MODE
675 to avoid the possibility that an outer LSHIFTRT shifts by more
676 than the sign extension's sign_bit_copies and introduces zeros
677 into the high bits of the result. */
678 && 2 * precision <= op_precision
679 && CONST_INT_P (XEXP (op, 1))
680 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682 && UINTVAL (XEXP (op, 1)) < precision)
683 return simplify_gen_binary (ASHIFTRT, mode,
684 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
686 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
687 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if ((GET_CODE (op) == LSHIFTRT
690 || GET_CODE (op) == ASHIFTRT)
691 && CONST_INT_P (XEXP (op, 1))
692 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
693 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694 && UINTVAL (XEXP (op, 1)) < precision)
695 return simplify_gen_binary (LSHIFTRT, mode,
696 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
698 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
699 to (ashift:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if (GET_CODE (op) == ASHIFT
702 && CONST_INT_P (XEXP (op, 1))
703 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
704 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
705 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
706 && UINTVAL (XEXP (op, 1)) < precision)
707 return simplify_gen_binary (ASHIFT, mode,
708 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
710 /* Recognize a word extraction from a multi-word subreg. */
711 if ((GET_CODE (op) == LSHIFTRT
712 || GET_CODE (op) == ASHIFTRT)
713 && SCALAR_INT_MODE_P (mode)
714 && SCALAR_INT_MODE_P (op_mode)
715 && precision >= BITS_PER_WORD
716 && 2 * precision <= op_precision
717 && CONST_INT_P (XEXP (op, 1))
718 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
719 && UINTVAL (XEXP (op, 1)) < op_precision)
721 int byte = subreg_lowpart_offset (mode, op_mode);
722 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
723 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
724 (WORDS_BIG_ENDIAN
725 ? byte - shifted_bytes
726 : byte + shifted_bytes));
729 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
730 and try replacing the TRUNCATE and shift with it. Don't do this
731 if the MEM has a mode-dependent address. */
732 if ((GET_CODE (op) == LSHIFTRT
733 || GET_CODE (op) == ASHIFTRT)
734 && SCALAR_INT_MODE_P (op_mode)
735 && MEM_P (XEXP (op, 0))
736 && CONST_INT_P (XEXP (op, 1))
737 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
738 && INTVAL (XEXP (op, 1)) > 0
739 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
740 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
741 MEM_ADDR_SPACE (XEXP (op, 0)))
742 && ! MEM_VOLATILE_P (XEXP (op, 0))
743 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
744 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
746 int byte = subreg_lowpart_offset (mode, op_mode);
747 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
748 return adjust_address_nv (XEXP (op, 0), mode,
749 (WORDS_BIG_ENDIAN
750 ? byte - shifted_bytes
751 : byte + shifted_bytes));
754 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
755 (OP:SI foo:SI) if OP is NEG or ABS. */
756 if ((GET_CODE (op) == ABS
757 || GET_CODE (op) == NEG)
758 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
759 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
760 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
761 return simplify_gen_unary (GET_CODE (op), mode,
762 XEXP (XEXP (op, 0), 0), mode);
764 /* (truncate:A (subreg:B (truncate:C X) 0)) is
765 (truncate:A X). */
766 if (GET_CODE (op) == SUBREG
767 && SCALAR_INT_MODE_P (mode)
768 && SCALAR_INT_MODE_P (op_mode)
769 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
770 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
771 && subreg_lowpart_p (op))
773 rtx inner = XEXP (SUBREG_REG (op), 0);
774 if (GET_MODE_PRECISION (mode)
775 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
776 return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
777 else
778 /* If subreg above is paradoxical and C is narrower
779 than A, return (subreg:A (truncate:C X) 0). */
780 return simplify_gen_subreg (mode, SUBREG_REG (op),
781 GET_MODE (SUBREG_REG (op)), 0);
784 /* (truncate:A (truncate:B X)) is (truncate:A X). */
785 if (GET_CODE (op) == TRUNCATE)
786 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
787 GET_MODE (XEXP (op, 0)));
789 return NULL_RTX;
792 /* Try to simplify a unary operation CODE whose output mode is to be
793 MODE with input operand OP whose mode was originally OP_MODE.
794 Return zero if no simplification can be made. */
796 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
797 rtx op, enum machine_mode op_mode)
799 rtx trueop, tem;
801 trueop = avoid_constant_pool_reference (op);
803 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
804 if (tem)
805 return tem;
807 return simplify_unary_operation_1 (code, mode, op);
810 /* Perform some simplifications we can do even if the operands
811 aren't constant. */
812 static rtx
813 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
815 enum rtx_code reversed;
816 rtx temp;
818 switch (code)
820 case NOT:
821 /* (not (not X)) == X. */
822 if (GET_CODE (op) == NOT)
823 return XEXP (op, 0);
825 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
826 comparison is all ones. */
827 if (COMPARISON_P (op)
828 && (mode == BImode || STORE_FLAG_VALUE == -1)
829 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
830 return simplify_gen_relational (reversed, mode, VOIDmode,
831 XEXP (op, 0), XEXP (op, 1));
833 /* (not (plus X -1)) can become (neg X). */
834 if (GET_CODE (op) == PLUS
835 && XEXP (op, 1) == constm1_rtx)
836 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
838 /* Similarly, (not (neg X)) is (plus X -1). */
839 if (GET_CODE (op) == NEG)
840 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
841 CONSTM1_RTX (mode));
843 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
844 if (GET_CODE (op) == XOR
845 && CONST_INT_P (XEXP (op, 1))
846 && (temp = simplify_unary_operation (NOT, mode,
847 XEXP (op, 1), mode)) != 0)
848 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
850 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
851 if (GET_CODE (op) == PLUS
852 && CONST_INT_P (XEXP (op, 1))
853 && mode_signbit_p (mode, XEXP (op, 1))
854 && (temp = simplify_unary_operation (NOT, mode,
855 XEXP (op, 1), mode)) != 0)
856 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
859 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
860 operands other than 1, but that is not valid. We could do a
861 similar simplification for (not (lshiftrt C X)) where C is
862 just the sign bit, but this doesn't seem common enough to
863 bother with. */
864 if (GET_CODE (op) == ASHIFT
865 && XEXP (op, 0) == const1_rtx)
867 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
868 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
871 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
872 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
873 so we can perform the above simplification. */
874 if (STORE_FLAG_VALUE == -1
875 && GET_CODE (op) == ASHIFTRT
876 && GET_CODE (XEXP (op, 1))
877 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
878 return simplify_gen_relational (GE, mode, VOIDmode,
879 XEXP (op, 0), const0_rtx);
882 if (GET_CODE (op) == SUBREG
883 && subreg_lowpart_p (op)
884 && (GET_MODE_SIZE (GET_MODE (op))
885 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
886 && GET_CODE (SUBREG_REG (op)) == ASHIFT
887 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
889 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
890 rtx x;
892 x = gen_rtx_ROTATE (inner_mode,
893 simplify_gen_unary (NOT, inner_mode, const1_rtx,
894 inner_mode),
895 XEXP (SUBREG_REG (op), 1));
896 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
897 if (temp)
898 return temp;
901 /* Apply De Morgan's laws to reduce number of patterns for machines
902 with negating logical insns (and-not, nand, etc.). If result has
903 only one NOT, put it first, since that is how the patterns are
904 coded. */
905 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
907 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
908 enum machine_mode op_mode;
910 op_mode = GET_MODE (in1);
911 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
913 op_mode = GET_MODE (in2);
914 if (op_mode == VOIDmode)
915 op_mode = mode;
916 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
918 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
920 rtx tem = in2;
921 in2 = in1; in1 = tem;
924 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
925 mode, in1, in2);
928 /* (not (bswap x)) -> (bswap (not x)). */
929 if (GET_CODE (op) == BSWAP)
931 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
932 return simplify_gen_unary (BSWAP, mode, x, mode);
934 break;
936 case NEG:
937 /* (neg (neg X)) == X. */
938 if (GET_CODE (op) == NEG)
939 return XEXP (op, 0);
941 /* (neg (plus X 1)) can become (not X). */
942 if (GET_CODE (op) == PLUS
943 && XEXP (op, 1) == const1_rtx)
944 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
946 /* Similarly, (neg (not X)) is (plus X 1). */
947 if (GET_CODE (op) == NOT)
948 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
949 CONST1_RTX (mode));
951 /* (neg (minus X Y)) can become (minus Y X). This transformation
952 isn't safe for modes with signed zeros, since if X and Y are
953 both +0, (minus Y X) is the same as (minus X Y). If the
954 rounding mode is towards +infinity (or -infinity) then the two
955 expressions will be rounded differently. */
956 if (GET_CODE (op) == MINUS
957 && !HONOR_SIGNED_ZEROS (mode)
958 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
959 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
961 if (GET_CODE (op) == PLUS
962 && !HONOR_SIGNED_ZEROS (mode)
963 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
965 /* (neg (plus A C)) is simplified to (minus -C A). */
966 if (CONST_SCALAR_INT_P (XEXP (op, 1))
967 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
969 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
970 if (temp)
971 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
974 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
975 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
976 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
979 /* (neg (mult A B)) becomes (mult A (neg B)).
980 This works even for floating-point values. */
981 if (GET_CODE (op) == MULT
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
984 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
985 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
988 /* NEG commutes with ASHIFT since it is multiplication. Only do
989 this if we can then eliminate the NEG (e.g., if the operand
990 is a constant). */
991 if (GET_CODE (op) == ASHIFT)
993 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
994 if (temp)
995 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
998 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
999 C is equal to the width of MODE minus 1. */
1000 if (GET_CODE (op) == ASHIFTRT
1001 && CONST_INT_P (XEXP (op, 1))
1002 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1003 return simplify_gen_binary (LSHIFTRT, mode,
1004 XEXP (op, 0), XEXP (op, 1));
1006 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1007 C is equal to the width of MODE minus 1. */
1008 if (GET_CODE (op) == LSHIFTRT
1009 && CONST_INT_P (XEXP (op, 1))
1010 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1011 return simplify_gen_binary (ASHIFTRT, mode,
1012 XEXP (op, 0), XEXP (op, 1));
1014 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1015 if (GET_CODE (op) == XOR
1016 && XEXP (op, 1) == const1_rtx
1017 && nonzero_bits (XEXP (op, 0), mode) == 1)
1018 return plus_constant (mode, XEXP (op, 0), -1);
1020 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1021 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1022 if (GET_CODE (op) == LT
1023 && XEXP (op, 1) == const0_rtx
1024 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1026 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1027 int isize = GET_MODE_PRECISION (inner);
1028 if (STORE_FLAG_VALUE == 1)
1030 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1031 GEN_INT (isize - 1));
1032 if (mode == inner)
1033 return temp;
1034 if (GET_MODE_PRECISION (mode) > isize)
1035 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1036 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1038 else if (STORE_FLAG_VALUE == -1)
1040 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1041 GEN_INT (isize - 1));
1042 if (mode == inner)
1043 return temp;
1044 if (GET_MODE_PRECISION (mode) > isize)
1045 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1046 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1049 break;
1051 case TRUNCATE:
1052 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1053 with the umulXi3_highpart patterns. */
1054 if (GET_CODE (op) == LSHIFTRT
1055 && GET_CODE (XEXP (op, 0)) == MULT)
1056 break;
1058 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1060 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1062 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1063 if (temp)
1064 return temp;
1066 /* We can't handle truncation to a partial integer mode here
1067 because we don't know the real bitsize of the partial
1068 integer mode. */
1069 break;
1072 if (GET_MODE (op) != VOIDmode)
1074 temp = simplify_truncation (mode, op, GET_MODE (op));
1075 if (temp)
1076 return temp;
1079 /* If we know that the value is already truncated, we can
1080 replace the TRUNCATE with a SUBREG. */
1081 if (GET_MODE_NUNITS (mode) == 1
1082 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1083 || truncated_to_mode (mode, op)))
1085 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1086 if (temp)
1087 return temp;
1090 /* A truncate of a comparison can be replaced with a subreg if
1091 STORE_FLAG_VALUE permits. This is like the previous test,
1092 but it works even if the comparison is done in a mode larger
1093 than HOST_BITS_PER_WIDE_INT. */
1094 if (HWI_COMPUTABLE_MODE_P (mode)
1095 && COMPARISON_P (op)
1096 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1098 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1099 if (temp)
1100 return temp;
1103 /* A truncate of a memory is just loading the low part of the memory
1104 if we are not changing the meaning of the address. */
1105 if (GET_CODE (op) == MEM
1106 && !VECTOR_MODE_P (mode)
1107 && !MEM_VOLATILE_P (op)
1108 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1110 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1111 if (temp)
1112 return temp;
1115 break;
1117 case FLOAT_TRUNCATE:
1118 if (DECIMAL_FLOAT_MODE_P (mode))
1119 break;
1121 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1122 if (GET_CODE (op) == FLOAT_EXTEND
1123 && GET_MODE (XEXP (op, 0)) == mode)
1124 return XEXP (op, 0);
1126 /* (float_truncate:SF (float_truncate:DF foo:XF))
1127 = (float_truncate:SF foo:XF).
1128 This may eliminate double rounding, so it is unsafe.
1130 (float_truncate:SF (float_extend:XF foo:DF))
1131 = (float_truncate:SF foo:DF).
1133 (float_truncate:DF (float_extend:XF foo:SF))
1134 = (float_extend:SF foo:DF). */
1135 if ((GET_CODE (op) == FLOAT_TRUNCATE
1136 && flag_unsafe_math_optimizations)
1137 || GET_CODE (op) == FLOAT_EXTEND)
1138 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1139 0)))
1140 > GET_MODE_SIZE (mode)
1141 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1142 mode,
1143 XEXP (op, 0), mode);
1145 /* (float_truncate (float x)) is (float x) */
1146 if (GET_CODE (op) == FLOAT
1147 && (flag_unsafe_math_optimizations
1148 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1149 && ((unsigned)significand_size (GET_MODE (op))
1150 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1151 - num_sign_bit_copies (XEXP (op, 0),
1152 GET_MODE (XEXP (op, 0))))))))
1153 return simplify_gen_unary (FLOAT, mode,
1154 XEXP (op, 0),
1155 GET_MODE (XEXP (op, 0)));
1157 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1158 (OP:SF foo:SF) if OP is NEG or ABS. */
1159 if ((GET_CODE (op) == ABS
1160 || GET_CODE (op) == NEG)
1161 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1162 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1163 return simplify_gen_unary (GET_CODE (op), mode,
1164 XEXP (XEXP (op, 0), 0), mode);
1166 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1167 is (float_truncate:SF x). */
1168 if (GET_CODE (op) == SUBREG
1169 && subreg_lowpart_p (op)
1170 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1171 return SUBREG_REG (op);
1172 break;
1174 case FLOAT_EXTEND:
1175 if (DECIMAL_FLOAT_MODE_P (mode))
1176 break;
1178 /* (float_extend (float_extend x)) is (float_extend x)
1180 (float_extend (float x)) is (float x) assuming that double
1181 rounding can't happen.
1183 if (GET_CODE (op) == FLOAT_EXTEND
1184 || (GET_CODE (op) == FLOAT
1185 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1186 && ((unsigned)significand_size (GET_MODE (op))
1187 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1188 - num_sign_bit_copies (XEXP (op, 0),
1189 GET_MODE (XEXP (op, 0)))))))
1190 return simplify_gen_unary (GET_CODE (op), mode,
1191 XEXP (op, 0),
1192 GET_MODE (XEXP (op, 0)));
1194 break;
1196 case ABS:
1197 /* (abs (neg <foo>)) -> (abs <foo>) */
1198 if (GET_CODE (op) == NEG)
1199 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1200 GET_MODE (XEXP (op, 0)));
1202 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1203 do nothing. */
1204 if (GET_MODE (op) == VOIDmode)
1205 break;
1207 /* If operand is something known to be positive, ignore the ABS. */
1208 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1209 || val_signbit_known_clear_p (GET_MODE (op),
1210 nonzero_bits (op, GET_MODE (op))))
1211 return op;
1213 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1214 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1215 return gen_rtx_NEG (mode, op);
1217 break;
1219 case FFS:
1220 /* (ffs (*_extend <X>)) = (ffs <X>) */
1221 if (GET_CODE (op) == SIGN_EXTEND
1222 || GET_CODE (op) == ZERO_EXTEND)
1223 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1224 GET_MODE (XEXP (op, 0)));
1225 break;
1227 case POPCOUNT:
1228 switch (GET_CODE (op))
1230 case BSWAP:
1231 case ZERO_EXTEND:
1232 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1233 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1234 GET_MODE (XEXP (op, 0)));
1236 case ROTATE:
1237 case ROTATERT:
1238 /* Rotations don't affect popcount. */
1239 if (!side_effects_p (XEXP (op, 1)))
1240 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1241 GET_MODE (XEXP (op, 0)));
1242 break;
1244 default:
1245 break;
1247 break;
1249 case PARITY:
1250 switch (GET_CODE (op))
1252 case NOT:
1253 case BSWAP:
1254 case ZERO_EXTEND:
1255 case SIGN_EXTEND:
1256 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1257 GET_MODE (XEXP (op, 0)));
1259 case ROTATE:
1260 case ROTATERT:
1261 /* Rotations don't affect parity. */
1262 if (!side_effects_p (XEXP (op, 1)))
1263 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1264 GET_MODE (XEXP (op, 0)));
1265 break;
1267 default:
1268 break;
1270 break;
1272 case BSWAP:
1273 /* (bswap (bswap x)) -> x. */
1274 if (GET_CODE (op) == BSWAP)
1275 return XEXP (op, 0);
1276 break;
1278 case FLOAT:
1279 /* (float (sign_extend <X>)) = (float <X>). */
1280 if (GET_CODE (op) == SIGN_EXTEND)
1281 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1282 GET_MODE (XEXP (op, 0)));
1283 break;
1285 case SIGN_EXTEND:
1286 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1287 becomes just the MINUS if its mode is MODE. This allows
1288 folding switch statements on machines using casesi (such as
1289 the VAX). */
1290 if (GET_CODE (op) == TRUNCATE
1291 && GET_MODE (XEXP (op, 0)) == mode
1292 && GET_CODE (XEXP (op, 0)) == MINUS
1293 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1294 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1295 return XEXP (op, 0);
1297 /* Extending a widening multiplication should be canonicalized to
1298 a wider widening multiplication. */
1299 if (GET_CODE (op) == MULT)
1301 rtx lhs = XEXP (op, 0);
1302 rtx rhs = XEXP (op, 1);
1303 enum rtx_code lcode = GET_CODE (lhs);
1304 enum rtx_code rcode = GET_CODE (rhs);
1306 /* Widening multiplies usually extend both operands, but sometimes
1307 they use a shift to extract a portion of a register. */
1308 if ((lcode == SIGN_EXTEND
1309 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1310 && (rcode == SIGN_EXTEND
1311 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1313 enum machine_mode lmode = GET_MODE (lhs);
1314 enum machine_mode rmode = GET_MODE (rhs);
1315 int bits;
1317 if (lcode == ASHIFTRT)
1318 /* Number of bits not shifted off the end. */
1319 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1320 else /* lcode == SIGN_EXTEND */
1321 /* Size of inner mode. */
1322 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1324 if (rcode == ASHIFTRT)
1325 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1326 else /* rcode == SIGN_EXTEND */
1327 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1329 /* We can only widen multiplies if the result is mathematiclly
1330 equivalent. I.e. if overflow was impossible. */
1331 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1332 return simplify_gen_binary
1333 (MULT, mode,
1334 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1335 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1339 /* Check for a sign extension of a subreg of a promoted
1340 variable, where the promotion is sign-extended, and the
1341 target mode is the same as the variable's promotion. */
1342 if (GET_CODE (op) == SUBREG
1343 && SUBREG_PROMOTED_VAR_P (op)
1344 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1345 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1347 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1348 if (temp)
1349 return temp;
1352 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1353 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1354 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1356 gcc_assert (GET_MODE_BITSIZE (mode)
1357 > GET_MODE_BITSIZE (GET_MODE (op)));
1358 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1359 GET_MODE (XEXP (op, 0)));
1362 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1363 is (sign_extend:M (subreg:O <X>)) if there is mode with
1364 GET_MODE_BITSIZE (N) - I bits.
1365 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1366 is similarly (zero_extend:M (subreg:O <X>)). */
1367 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1368 && GET_CODE (XEXP (op, 0)) == ASHIFT
1369 && CONST_INT_P (XEXP (op, 1))
1370 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1371 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1373 enum machine_mode tmode
1374 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1375 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1376 gcc_assert (GET_MODE_BITSIZE (mode)
1377 > GET_MODE_BITSIZE (GET_MODE (op)));
1378 if (tmode != BLKmode)
1380 rtx inner =
1381 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1382 if (inner)
1383 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1384 ? SIGN_EXTEND : ZERO_EXTEND,
1385 mode, inner, tmode);
1389 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1390 /* As we do not know which address space the pointer is referring to,
1391 we can do this only if the target does not support different pointer
1392 or address modes depending on the address space. */
1393 if (target_default_pointer_address_modes_p ()
1394 && ! POINTERS_EXTEND_UNSIGNED
1395 && mode == Pmode && GET_MODE (op) == ptr_mode
1396 && (CONSTANT_P (op)
1397 || (GET_CODE (op) == SUBREG
1398 && REG_P (SUBREG_REG (op))
1399 && REG_POINTER (SUBREG_REG (op))
1400 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1401 return convert_memory_address (Pmode, op);
1402 #endif
1403 break;
1405 case ZERO_EXTEND:
1406 /* Check for a zero extension of a subreg of a promoted
1407 variable, where the promotion is zero-extended, and the
1408 target mode is the same as the variable's promotion. */
1409 if (GET_CODE (op) == SUBREG
1410 && SUBREG_PROMOTED_VAR_P (op)
1411 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1412 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1414 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1415 if (temp)
1416 return temp;
1419 /* Extending a widening multiplication should be canonicalized to
1420 a wider widening multiplication. */
1421 if (GET_CODE (op) == MULT)
1423 rtx lhs = XEXP (op, 0);
1424 rtx rhs = XEXP (op, 1);
1425 enum rtx_code lcode = GET_CODE (lhs);
1426 enum rtx_code rcode = GET_CODE (rhs);
1428 /* Widening multiplies usually extend both operands, but sometimes
1429 they use a shift to extract a portion of a register. */
1430 if ((lcode == ZERO_EXTEND
1431 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1432 && (rcode == ZERO_EXTEND
1433 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1435 enum machine_mode lmode = GET_MODE (lhs);
1436 enum machine_mode rmode = GET_MODE (rhs);
1437 int bits;
1439 if (lcode == LSHIFTRT)
1440 /* Number of bits not shifted off the end. */
1441 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1442 else /* lcode == ZERO_EXTEND */
1443 /* Size of inner mode. */
1444 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1446 if (rcode == LSHIFTRT)
1447 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1448 else /* rcode == ZERO_EXTEND */
1449 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1451 /* We can only widen multiplies if the result is mathematiclly
1452 equivalent. I.e. if overflow was impossible. */
1453 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1454 return simplify_gen_binary
1455 (MULT, mode,
1456 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1457 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1461 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1462 if (GET_CODE (op) == ZERO_EXTEND)
1463 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1464 GET_MODE (XEXP (op, 0)));
1466 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1467 is (zero_extend:M (subreg:O <X>)) if there is mode with
1468 GET_MODE_BITSIZE (N) - I bits. */
1469 if (GET_CODE (op) == LSHIFTRT
1470 && GET_CODE (XEXP (op, 0)) == ASHIFT
1471 && CONST_INT_P (XEXP (op, 1))
1472 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1473 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1475 enum machine_mode tmode
1476 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1477 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1478 if (tmode != BLKmode)
1480 rtx inner =
1481 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1482 if (inner)
1483 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1487 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1488 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1489 of mode N. E.g.
1490 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1491 (and:SI (reg:SI) (const_int 63)). */
1492 if (GET_CODE (op) == SUBREG
1493 && GET_MODE_PRECISION (GET_MODE (op))
1494 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1495 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1496 <= HOST_BITS_PER_WIDE_INT
1497 && GET_MODE_PRECISION (mode)
1498 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1499 && subreg_lowpart_p (op)
1500 && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1501 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1503 if (GET_MODE_PRECISION (mode)
1504 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1505 return SUBREG_REG (op);
1506 return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1507 GET_MODE (SUBREG_REG (op)));
1510 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1511 /* As we do not know which address space the pointer is referring to,
1512 we can do this only if the target does not support different pointer
1513 or address modes depending on the address space. */
1514 if (target_default_pointer_address_modes_p ()
1515 && POINTERS_EXTEND_UNSIGNED > 0
1516 && mode == Pmode && GET_MODE (op) == ptr_mode
1517 && (CONSTANT_P (op)
1518 || (GET_CODE (op) == SUBREG
1519 && REG_P (SUBREG_REG (op))
1520 && REG_POINTER (SUBREG_REG (op))
1521 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1522 return convert_memory_address (Pmode, op);
1523 #endif
1524 break;
1526 default:
1527 break;
1530 return 0;
1533 /* Try to compute the value of a unary operation CODE whose output mode is to
1534 be MODE with input operand OP whose mode was originally OP_MODE.
1535 Return zero if the value cannot be computed. */
1537 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1538 rtx op, enum machine_mode op_mode)
1540 unsigned int width = GET_MODE_PRECISION (mode);
1541 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1543 if (code == VEC_DUPLICATE)
1545 gcc_assert (VECTOR_MODE_P (mode));
1546 if (GET_MODE (op) != VOIDmode)
1548 if (!VECTOR_MODE_P (GET_MODE (op)))
1549 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1550 else
1551 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1552 (GET_MODE (op)));
1554 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1555 || GET_CODE (op) == CONST_VECTOR)
1557 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1558 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1559 rtvec v = rtvec_alloc (n_elts);
1560 unsigned int i;
1562 if (GET_CODE (op) != CONST_VECTOR)
1563 for (i = 0; i < n_elts; i++)
1564 RTVEC_ELT (v, i) = op;
1565 else
1567 enum machine_mode inmode = GET_MODE (op);
1568 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1569 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1571 gcc_assert (in_n_elts < n_elts);
1572 gcc_assert ((n_elts % in_n_elts) == 0);
1573 for (i = 0; i < n_elts; i++)
1574 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1576 return gen_rtx_CONST_VECTOR (mode, v);
1580 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1582 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1583 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1584 enum machine_mode opmode = GET_MODE (op);
1585 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1586 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1587 rtvec v = rtvec_alloc (n_elts);
1588 unsigned int i;
1590 gcc_assert (op_n_elts == n_elts);
1591 for (i = 0; i < n_elts; i++)
1593 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1594 CONST_VECTOR_ELT (op, i),
1595 GET_MODE_INNER (opmode));
1596 if (!x)
1597 return 0;
1598 RTVEC_ELT (v, i) = x;
1600 return gen_rtx_CONST_VECTOR (mode, v);
1603 /* The order of these tests is critical so that, for example, we don't
1604 check the wrong mode (input vs. output) for a conversion operation,
1605 such as FIX. At some point, this should be simplified. */
1607 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1609 HOST_WIDE_INT hv, lv;
1610 REAL_VALUE_TYPE d;
1612 if (CONST_INT_P (op))
1613 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1614 else
1615 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1617 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1618 d = real_value_truncate (mode, d);
1619 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1621 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1623 HOST_WIDE_INT hv, lv;
1624 REAL_VALUE_TYPE d;
1626 if (CONST_INT_P (op))
1627 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1628 else
1629 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1631 if (op_mode == VOIDmode
1632 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1633 /* We should never get a negative number. */
1634 gcc_assert (hv >= 0);
1635 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1636 hv = 0, lv &= GET_MODE_MASK (op_mode);
1638 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1639 d = real_value_truncate (mode, d);
1640 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1643 if (CONST_INT_P (op)
1644 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1646 HOST_WIDE_INT arg0 = INTVAL (op);
1647 HOST_WIDE_INT val;
1649 switch (code)
1651 case NOT:
1652 val = ~ arg0;
1653 break;
1655 case NEG:
1656 val = - (unsigned HOST_WIDE_INT) arg0;
1657 break;
1659 case ABS:
1660 val = (arg0 >= 0 ? arg0 : - arg0);
1661 break;
1663 case FFS:
1664 arg0 &= GET_MODE_MASK (mode);
1665 val = ffs_hwi (arg0);
1666 break;
1668 case CLZ:
1669 arg0 &= GET_MODE_MASK (mode);
1670 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1672 else
1673 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1674 break;
1676 case CLRSB:
1677 arg0 &= GET_MODE_MASK (mode);
1678 if (arg0 == 0)
1679 val = GET_MODE_PRECISION (mode) - 1;
1680 else if (arg0 >= 0)
1681 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1682 else if (arg0 < 0)
1683 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1684 break;
1686 case CTZ:
1687 arg0 &= GET_MODE_MASK (mode);
1688 if (arg0 == 0)
1690 /* Even if the value at zero is undefined, we have to come
1691 up with some replacement. Seems good enough. */
1692 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1693 val = GET_MODE_PRECISION (mode);
1695 else
1696 val = ctz_hwi (arg0);
1697 break;
1699 case POPCOUNT:
1700 arg0 &= GET_MODE_MASK (mode);
1701 val = 0;
1702 while (arg0)
1703 val++, arg0 &= arg0 - 1;
1704 break;
1706 case PARITY:
1707 arg0 &= GET_MODE_MASK (mode);
1708 val = 0;
1709 while (arg0)
1710 val++, arg0 &= arg0 - 1;
1711 val &= 1;
1712 break;
1714 case BSWAP:
1716 unsigned int s;
1718 val = 0;
1719 for (s = 0; s < width; s += 8)
1721 unsigned int d = width - s - 8;
1722 unsigned HOST_WIDE_INT byte;
1723 byte = (arg0 >> s) & 0xff;
1724 val |= byte << d;
1727 break;
1729 case TRUNCATE:
1730 val = arg0;
1731 break;
1733 case ZERO_EXTEND:
1734 /* When zero-extending a CONST_INT, we need to know its
1735 original mode. */
1736 gcc_assert (op_mode != VOIDmode);
1737 if (op_width == HOST_BITS_PER_WIDE_INT)
1739 /* If we were really extending the mode,
1740 we would have to distinguish between zero-extension
1741 and sign-extension. */
1742 gcc_assert (width == op_width);
1743 val = arg0;
1745 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1746 val = arg0 & GET_MODE_MASK (op_mode);
1747 else
1748 return 0;
1749 break;
1751 case SIGN_EXTEND:
1752 if (op_mode == VOIDmode)
1753 op_mode = mode;
1754 op_width = GET_MODE_PRECISION (op_mode);
1755 if (op_width == HOST_BITS_PER_WIDE_INT)
1757 /* If we were really extending the mode,
1758 we would have to distinguish between zero-extension
1759 and sign-extension. */
1760 gcc_assert (width == op_width);
1761 val = arg0;
1763 else if (op_width < HOST_BITS_PER_WIDE_INT)
1765 val = arg0 & GET_MODE_MASK (op_mode);
1766 if (val_signbit_known_set_p (op_mode, val))
1767 val |= ~GET_MODE_MASK (op_mode);
1769 else
1770 return 0;
1771 break;
1773 case SQRT:
1774 case FLOAT_EXTEND:
1775 case FLOAT_TRUNCATE:
1776 case SS_TRUNCATE:
1777 case US_TRUNCATE:
1778 case SS_NEG:
1779 case US_NEG:
1780 case SS_ABS:
1781 return 0;
1783 default:
1784 gcc_unreachable ();
1787 return gen_int_mode (val, mode);
1790 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1791 for a DImode operation on a CONST_INT. */
1792 else if (width <= HOST_BITS_PER_DOUBLE_INT
1793 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1795 double_int first, value;
1797 if (CONST_DOUBLE_AS_INT_P (op))
1798 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1799 CONST_DOUBLE_LOW (op));
1800 else
1801 first = double_int::from_shwi (INTVAL (op));
1803 switch (code)
1805 case NOT:
1806 value = ~first;
1807 break;
1809 case NEG:
1810 value = -first;
1811 break;
1813 case ABS:
1814 if (first.is_negative ())
1815 value = -first;
1816 else
1817 value = first;
1818 break;
1820 case FFS:
1821 value.high = 0;
1822 if (first.low != 0)
1823 value.low = ffs_hwi (first.low);
1824 else if (first.high != 0)
1825 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1826 else
1827 value.low = 0;
1828 break;
1830 case CLZ:
1831 value.high = 0;
1832 if (first.high != 0)
1833 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1834 - HOST_BITS_PER_WIDE_INT;
1835 else if (first.low != 0)
1836 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1837 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1838 value.low = GET_MODE_PRECISION (mode);
1839 break;
1841 case CTZ:
1842 value.high = 0;
1843 if (first.low != 0)
1844 value.low = ctz_hwi (first.low);
1845 else if (first.high != 0)
1846 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1847 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1848 value.low = GET_MODE_PRECISION (mode);
1849 break;
1851 case POPCOUNT:
1852 value = double_int_zero;
1853 while (first.low)
1855 value.low++;
1856 first.low &= first.low - 1;
1858 while (first.high)
1860 value.low++;
1861 first.high &= first.high - 1;
1863 break;
1865 case PARITY:
1866 value = double_int_zero;
1867 while (first.low)
1869 value.low++;
1870 first.low &= first.low - 1;
1872 while (first.high)
1874 value.low++;
1875 first.high &= first.high - 1;
1877 value.low &= 1;
1878 break;
1880 case BSWAP:
1882 unsigned int s;
1884 value = double_int_zero;
1885 for (s = 0; s < width; s += 8)
1887 unsigned int d = width - s - 8;
1888 unsigned HOST_WIDE_INT byte;
1890 if (s < HOST_BITS_PER_WIDE_INT)
1891 byte = (first.low >> s) & 0xff;
1892 else
1893 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1895 if (d < HOST_BITS_PER_WIDE_INT)
1896 value.low |= byte << d;
1897 else
1898 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1901 break;
1903 case TRUNCATE:
1904 /* This is just a change-of-mode, so do nothing. */
1905 value = first;
1906 break;
1908 case ZERO_EXTEND:
1909 gcc_assert (op_mode != VOIDmode);
1911 if (op_width > HOST_BITS_PER_WIDE_INT)
1912 return 0;
1914 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1915 break;
1917 case SIGN_EXTEND:
1918 if (op_mode == VOIDmode
1919 || op_width > HOST_BITS_PER_WIDE_INT)
1920 return 0;
1921 else
1923 value.low = first.low & GET_MODE_MASK (op_mode);
1924 if (val_signbit_known_set_p (op_mode, value.low))
1925 value.low |= ~GET_MODE_MASK (op_mode);
1927 value.high = HWI_SIGN_EXTEND (value.low);
1929 break;
1931 case SQRT:
1932 return 0;
1934 default:
1935 return 0;
1938 return immed_double_int_const (value, mode);
1941 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1942 && SCALAR_FLOAT_MODE_P (mode)
1943 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1945 REAL_VALUE_TYPE d;
1946 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1948 switch (code)
1950 case SQRT:
1951 return 0;
1952 case ABS:
1953 d = real_value_abs (&d);
1954 break;
1955 case NEG:
1956 d = real_value_negate (&d);
1957 break;
1958 case FLOAT_TRUNCATE:
1959 d = real_value_truncate (mode, d);
1960 break;
1961 case FLOAT_EXTEND:
1962 /* All this does is change the mode, unless changing
1963 mode class. */
1964 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1965 real_convert (&d, mode, &d);
1966 break;
1967 case FIX:
1968 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1969 break;
1970 case NOT:
1972 long tmp[4];
1973 int i;
1975 real_to_target (tmp, &d, GET_MODE (op));
1976 for (i = 0; i < 4; i++)
1977 tmp[i] = ~tmp[i];
1978 real_from_target (&d, tmp, mode);
1979 break;
1981 default:
1982 gcc_unreachable ();
1984 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1987 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1988 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1989 && GET_MODE_CLASS (mode) == MODE_INT
1990 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1992 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1993 operators are intentionally left unspecified (to ease implementation
1994 by target backends), for consistency, this routine implements the
1995 same semantics for constant folding as used by the middle-end. */
1997 /* This was formerly used only for non-IEEE float.
1998 eggert@twinsun.com says it is safe for IEEE also. */
1999 HOST_WIDE_INT xh, xl, th, tl;
2000 REAL_VALUE_TYPE x, t;
2001 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
2002 switch (code)
2004 case FIX:
2005 if (REAL_VALUE_ISNAN (x))
2006 return const0_rtx;
2008 /* Test against the signed upper bound. */
2009 if (width > HOST_BITS_PER_WIDE_INT)
2011 th = ((unsigned HOST_WIDE_INT) 1
2012 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
2013 tl = -1;
2015 else
2017 th = 0;
2018 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
2020 real_from_integer (&t, VOIDmode, tl, th, 0);
2021 if (REAL_VALUES_LESS (t, x))
2023 xh = th;
2024 xl = tl;
2025 break;
2028 /* Test against the signed lower bound. */
2029 if (width > HOST_BITS_PER_WIDE_INT)
2031 th = HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT - 1);
2032 tl = 0;
2034 else
2036 th = -1;
2037 tl = HOST_WIDE_INT_M1U << (width - 1);
2039 real_from_integer (&t, VOIDmode, tl, th, 0);
2040 if (REAL_VALUES_LESS (x, t))
2042 xh = th;
2043 xl = tl;
2044 break;
2046 REAL_VALUE_TO_INT (&xl, &xh, x);
2047 break;
2049 case UNSIGNED_FIX:
2050 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2051 return const0_rtx;
2053 /* Test against the unsigned upper bound. */
2054 if (width == HOST_BITS_PER_DOUBLE_INT)
2056 th = -1;
2057 tl = -1;
2059 else if (width >= HOST_BITS_PER_WIDE_INT)
2061 th = ((unsigned HOST_WIDE_INT) 1
2062 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2063 tl = -1;
2065 else
2067 th = 0;
2068 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2070 real_from_integer (&t, VOIDmode, tl, th, 1);
2071 if (REAL_VALUES_LESS (t, x))
2073 xh = th;
2074 xl = tl;
2075 break;
2078 REAL_VALUE_TO_INT (&xl, &xh, x);
2079 break;
2081 default:
2082 gcc_unreachable ();
2084 return immed_double_const (xl, xh, mode);
2087 return NULL_RTX;
2090 /* Subroutine of simplify_binary_operation to simplify a binary operation
2091 CODE that can commute with byte swapping, with result mode MODE and
2092 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2093 Return zero if no simplification or canonicalization is possible. */
2095 static rtx
2096 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
2097 rtx op0, rtx op1)
2099 rtx tem;
2101 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2102 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2104 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2105 simplify_gen_unary (BSWAP, mode, op1, mode));
2106 return simplify_gen_unary (BSWAP, mode, tem, mode);
2109 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2110 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2112 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2113 return simplify_gen_unary (BSWAP, mode, tem, mode);
2116 return NULL_RTX;
2119 /* Subroutine of simplify_binary_operation to simplify a commutative,
2120 associative binary operation CODE with result mode MODE, operating
2121 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2122 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2123 canonicalization is possible. */
2125 static rtx
2126 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2127 rtx op0, rtx op1)
2129 rtx tem;
2131 /* Linearize the operator to the left. */
2132 if (GET_CODE (op1) == code)
2134 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2135 if (GET_CODE (op0) == code)
2137 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2138 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2141 /* "a op (b op c)" becomes "(b op c) op a". */
2142 if (! swap_commutative_operands_p (op1, op0))
2143 return simplify_gen_binary (code, mode, op1, op0);
2145 tem = op0;
2146 op0 = op1;
2147 op1 = tem;
2150 if (GET_CODE (op0) == code)
2152 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2153 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2155 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2156 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2159 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2160 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2161 if (tem != 0)
2162 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2164 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2165 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2166 if (tem != 0)
2167 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2170 return 0;
2174 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2175 and OP1. Return 0 if no simplification is possible.
2177 Don't use this for relational operations such as EQ or LT.
2178 Use simplify_relational_operation instead. */
2180 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2181 rtx op0, rtx op1)
2183 rtx trueop0, trueop1;
2184 rtx tem;
2186 /* Relational operations don't work here. We must know the mode
2187 of the operands in order to do the comparison correctly.
2188 Assuming a full word can give incorrect results.
2189 Consider comparing 128 with -128 in QImode. */
2190 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2191 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2193 /* Make sure the constant is second. */
2194 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2195 && swap_commutative_operands_p (op0, op1))
2197 tem = op0, op0 = op1, op1 = tem;
2200 trueop0 = avoid_constant_pool_reference (op0);
2201 trueop1 = avoid_constant_pool_reference (op1);
2203 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2204 if (tem)
2205 return tem;
2206 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2209 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2210 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2211 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2212 actual constants. */
2214 static rtx
2215 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2216 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2218 rtx tem, reversed, opleft, opright;
2219 HOST_WIDE_INT val;
2220 unsigned int width = GET_MODE_PRECISION (mode);
2222 /* Even if we can't compute a constant result,
2223 there are some cases worth simplifying. */
2225 switch (code)
2227 case PLUS:
2228 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2229 when x is NaN, infinite, or finite and nonzero. They aren't
2230 when x is -0 and the rounding mode is not towards -infinity,
2231 since (-0) + 0 is then 0. */
2232 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2233 return op0;
2235 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2236 transformations are safe even for IEEE. */
2237 if (GET_CODE (op0) == NEG)
2238 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2239 else if (GET_CODE (op1) == NEG)
2240 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2242 /* (~a) + 1 -> -a */
2243 if (INTEGRAL_MODE_P (mode)
2244 && GET_CODE (op0) == NOT
2245 && trueop1 == const1_rtx)
2246 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2248 /* Handle both-operands-constant cases. We can only add
2249 CONST_INTs to constants since the sum of relocatable symbols
2250 can't be handled by most assemblers. Don't add CONST_INT
2251 to CONST_INT since overflow won't be computed properly if wider
2252 than HOST_BITS_PER_WIDE_INT. */
2254 if ((GET_CODE (op0) == CONST
2255 || GET_CODE (op0) == SYMBOL_REF
2256 || GET_CODE (op0) == LABEL_REF)
2257 && CONST_INT_P (op1))
2258 return plus_constant (mode, op0, INTVAL (op1));
2259 else if ((GET_CODE (op1) == CONST
2260 || GET_CODE (op1) == SYMBOL_REF
2261 || GET_CODE (op1) == LABEL_REF)
2262 && CONST_INT_P (op0))
2263 return plus_constant (mode, op1, INTVAL (op0));
2265 /* See if this is something like X * C - X or vice versa or
2266 if the multiplication is written as a shift. If so, we can
2267 distribute and make a new multiply, shift, or maybe just
2268 have X (if C is 2 in the example above). But don't make
2269 something more expensive than we had before. */
2271 if (SCALAR_INT_MODE_P (mode))
2273 double_int coeff0, coeff1;
2274 rtx lhs = op0, rhs = op1;
2276 coeff0 = double_int_one;
2277 coeff1 = double_int_one;
2279 if (GET_CODE (lhs) == NEG)
2281 coeff0 = double_int_minus_one;
2282 lhs = XEXP (lhs, 0);
2284 else if (GET_CODE (lhs) == MULT
2285 && CONST_INT_P (XEXP (lhs, 1)))
2287 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2288 lhs = XEXP (lhs, 0);
2290 else if (GET_CODE (lhs) == ASHIFT
2291 && CONST_INT_P (XEXP (lhs, 1))
2292 && INTVAL (XEXP (lhs, 1)) >= 0
2293 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2295 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2296 lhs = XEXP (lhs, 0);
2299 if (GET_CODE (rhs) == NEG)
2301 coeff1 = double_int_minus_one;
2302 rhs = XEXP (rhs, 0);
2304 else if (GET_CODE (rhs) == MULT
2305 && CONST_INT_P (XEXP (rhs, 1)))
2307 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2308 rhs = XEXP (rhs, 0);
2310 else if (GET_CODE (rhs) == ASHIFT
2311 && CONST_INT_P (XEXP (rhs, 1))
2312 && INTVAL (XEXP (rhs, 1)) >= 0
2313 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2315 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2316 rhs = XEXP (rhs, 0);
2319 if (rtx_equal_p (lhs, rhs))
2321 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2322 rtx coeff;
2323 double_int val;
2324 bool speed = optimize_function_for_speed_p (cfun);
2326 val = coeff0 + coeff1;
2327 coeff = immed_double_int_const (val, mode);
2329 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2330 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2331 ? tem : 0;
2335 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2336 if (CONST_SCALAR_INT_P (op1)
2337 && GET_CODE (op0) == XOR
2338 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2339 && mode_signbit_p (mode, op1))
2340 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2341 simplify_gen_binary (XOR, mode, op1,
2342 XEXP (op0, 1)));
2344 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2345 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2346 && GET_CODE (op0) == MULT
2347 && GET_CODE (XEXP (op0, 0)) == NEG)
2349 rtx in1, in2;
2351 in1 = XEXP (XEXP (op0, 0), 0);
2352 in2 = XEXP (op0, 1);
2353 return simplify_gen_binary (MINUS, mode, op1,
2354 simplify_gen_binary (MULT, mode,
2355 in1, in2));
2358 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2359 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2360 is 1. */
2361 if (COMPARISON_P (op0)
2362 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2363 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2364 && (reversed = reversed_comparison (op0, mode)))
2365 return
2366 simplify_gen_unary (NEG, mode, reversed, mode);
2368 /* If one of the operands is a PLUS or a MINUS, see if we can
2369 simplify this by the associative law.
2370 Don't use the associative law for floating point.
2371 The inaccuracy makes it nonassociative,
2372 and subtle programs can break if operations are associated. */
2374 if (INTEGRAL_MODE_P (mode)
2375 && (plus_minus_operand_p (op0)
2376 || plus_minus_operand_p (op1))
2377 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2378 return tem;
2380 /* Reassociate floating point addition only when the user
2381 specifies associative math operations. */
2382 if (FLOAT_MODE_P (mode)
2383 && flag_associative_math)
2385 tem = simplify_associative_operation (code, mode, op0, op1);
2386 if (tem)
2387 return tem;
2389 break;
2391 case COMPARE:
2392 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2393 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2394 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2395 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2397 rtx xop00 = XEXP (op0, 0);
2398 rtx xop10 = XEXP (op1, 0);
2400 #ifdef HAVE_cc0
2401 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2402 #else
2403 if (REG_P (xop00) && REG_P (xop10)
2404 && GET_MODE (xop00) == GET_MODE (xop10)
2405 && REGNO (xop00) == REGNO (xop10)
2406 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2407 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2408 #endif
2409 return xop00;
2411 break;
2413 case MINUS:
2414 /* We can't assume x-x is 0 even with non-IEEE floating point,
2415 but since it is zero except in very strange circumstances, we
2416 will treat it as zero with -ffinite-math-only. */
2417 if (rtx_equal_p (trueop0, trueop1)
2418 && ! side_effects_p (op0)
2419 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2420 return CONST0_RTX (mode);
2422 /* Change subtraction from zero into negation. (0 - x) is the
2423 same as -x when x is NaN, infinite, or finite and nonzero.
2424 But if the mode has signed zeros, and does not round towards
2425 -infinity, then 0 - 0 is 0, not -0. */
2426 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2427 return simplify_gen_unary (NEG, mode, op1, mode);
2429 /* (-1 - a) is ~a. */
2430 if (trueop0 == constm1_rtx)
2431 return simplify_gen_unary (NOT, mode, op1, mode);
2433 /* Subtracting 0 has no effect unless the mode has signed zeros
2434 and supports rounding towards -infinity. In such a case,
2435 0 - 0 is -0. */
2436 if (!(HONOR_SIGNED_ZEROS (mode)
2437 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2438 && trueop1 == CONST0_RTX (mode))
2439 return op0;
2441 /* See if this is something like X * C - X or vice versa or
2442 if the multiplication is written as a shift. If so, we can
2443 distribute and make a new multiply, shift, or maybe just
2444 have X (if C is 2 in the example above). But don't make
2445 something more expensive than we had before. */
2447 if (SCALAR_INT_MODE_P (mode))
2449 double_int coeff0, negcoeff1;
2450 rtx lhs = op0, rhs = op1;
2452 coeff0 = double_int_one;
2453 negcoeff1 = double_int_minus_one;
2455 if (GET_CODE (lhs) == NEG)
2457 coeff0 = double_int_minus_one;
2458 lhs = XEXP (lhs, 0);
2460 else if (GET_CODE (lhs) == MULT
2461 && CONST_INT_P (XEXP (lhs, 1)))
2463 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2464 lhs = XEXP (lhs, 0);
2466 else if (GET_CODE (lhs) == ASHIFT
2467 && CONST_INT_P (XEXP (lhs, 1))
2468 && INTVAL (XEXP (lhs, 1)) >= 0
2469 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2471 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2472 lhs = XEXP (lhs, 0);
2475 if (GET_CODE (rhs) == NEG)
2477 negcoeff1 = double_int_one;
2478 rhs = XEXP (rhs, 0);
2480 else if (GET_CODE (rhs) == MULT
2481 && CONST_INT_P (XEXP (rhs, 1)))
2483 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2484 rhs = XEXP (rhs, 0);
2486 else if (GET_CODE (rhs) == ASHIFT
2487 && CONST_INT_P (XEXP (rhs, 1))
2488 && INTVAL (XEXP (rhs, 1)) >= 0
2489 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2491 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2492 negcoeff1 = -negcoeff1;
2493 rhs = XEXP (rhs, 0);
2496 if (rtx_equal_p (lhs, rhs))
2498 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2499 rtx coeff;
2500 double_int val;
2501 bool speed = optimize_function_for_speed_p (cfun);
2503 val = coeff0 + negcoeff1;
2504 coeff = immed_double_int_const (val, mode);
2506 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2507 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2508 ? tem : 0;
2512 /* (a - (-b)) -> (a + b). True even for IEEE. */
2513 if (GET_CODE (op1) == NEG)
2514 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2516 /* (-x - c) may be simplified as (-c - x). */
2517 if (GET_CODE (op0) == NEG
2518 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2520 tem = simplify_unary_operation (NEG, mode, op1, mode);
2521 if (tem)
2522 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2525 /* Don't let a relocatable value get a negative coeff. */
2526 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2527 return simplify_gen_binary (PLUS, mode,
2528 op0,
2529 neg_const_int (mode, op1));
2531 /* (x - (x & y)) -> (x & ~y) */
2532 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2534 if (rtx_equal_p (op0, XEXP (op1, 0)))
2536 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2537 GET_MODE (XEXP (op1, 1)));
2538 return simplify_gen_binary (AND, mode, op0, tem);
2540 if (rtx_equal_p (op0, XEXP (op1, 1)))
2542 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2543 GET_MODE (XEXP (op1, 0)));
2544 return simplify_gen_binary (AND, mode, op0, tem);
2548 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2549 by reversing the comparison code if valid. */
2550 if (STORE_FLAG_VALUE == 1
2551 && trueop0 == const1_rtx
2552 && COMPARISON_P (op1)
2553 && (reversed = reversed_comparison (op1, mode)))
2554 return reversed;
2556 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2557 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2558 && GET_CODE (op1) == MULT
2559 && GET_CODE (XEXP (op1, 0)) == NEG)
2561 rtx in1, in2;
2563 in1 = XEXP (XEXP (op1, 0), 0);
2564 in2 = XEXP (op1, 1);
2565 return simplify_gen_binary (PLUS, mode,
2566 simplify_gen_binary (MULT, mode,
2567 in1, in2),
2568 op0);
2571 /* Canonicalize (minus (neg A) (mult B C)) to
2572 (minus (mult (neg B) C) A). */
2573 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2574 && GET_CODE (op1) == MULT
2575 && GET_CODE (op0) == NEG)
2577 rtx in1, in2;
2579 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2580 in2 = XEXP (op1, 1);
2581 return simplify_gen_binary (MINUS, mode,
2582 simplify_gen_binary (MULT, mode,
2583 in1, in2),
2584 XEXP (op0, 0));
2587 /* If one of the operands is a PLUS or a MINUS, see if we can
2588 simplify this by the associative law. This will, for example,
2589 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2590 Don't use the associative law for floating point.
2591 The inaccuracy makes it nonassociative,
2592 and subtle programs can break if operations are associated. */
2594 if (INTEGRAL_MODE_P (mode)
2595 && (plus_minus_operand_p (op0)
2596 || plus_minus_operand_p (op1))
2597 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2598 return tem;
2599 break;
2601 case MULT:
2602 if (trueop1 == constm1_rtx)
2603 return simplify_gen_unary (NEG, mode, op0, mode);
2605 if (GET_CODE (op0) == NEG)
2607 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2608 /* If op1 is a MULT as well and simplify_unary_operation
2609 just moved the NEG to the second operand, simplify_gen_binary
2610 below could through simplify_associative_operation move
2611 the NEG around again and recurse endlessly. */
2612 if (temp
2613 && GET_CODE (op1) == MULT
2614 && GET_CODE (temp) == MULT
2615 && XEXP (op1, 0) == XEXP (temp, 0)
2616 && GET_CODE (XEXP (temp, 1)) == NEG
2617 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2618 temp = NULL_RTX;
2619 if (temp)
2620 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2622 if (GET_CODE (op1) == NEG)
2624 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2625 /* If op0 is a MULT as well and simplify_unary_operation
2626 just moved the NEG to the second operand, simplify_gen_binary
2627 below could through simplify_associative_operation move
2628 the NEG around again and recurse endlessly. */
2629 if (temp
2630 && GET_CODE (op0) == MULT
2631 && GET_CODE (temp) == MULT
2632 && XEXP (op0, 0) == XEXP (temp, 0)
2633 && GET_CODE (XEXP (temp, 1)) == NEG
2634 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2635 temp = NULL_RTX;
2636 if (temp)
2637 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2640 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2641 x is NaN, since x * 0 is then also NaN. Nor is it valid
2642 when the mode has signed zeros, since multiplying a negative
2643 number by 0 will give -0, not 0. */
2644 if (!HONOR_NANS (mode)
2645 && !HONOR_SIGNED_ZEROS (mode)
2646 && trueop1 == CONST0_RTX (mode)
2647 && ! side_effects_p (op0))
2648 return op1;
2650 /* In IEEE floating point, x*1 is not equivalent to x for
2651 signalling NaNs. */
2652 if (!HONOR_SNANS (mode)
2653 && trueop1 == CONST1_RTX (mode))
2654 return op0;
2656 /* Convert multiply by constant power of two into shift unless
2657 we are still generating RTL. This test is a kludge. */
2658 if (CONST_INT_P (trueop1)
2659 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2660 /* If the mode is larger than the host word size, and the
2661 uppermost bit is set, then this isn't a power of two due
2662 to implicit sign extension. */
2663 && (width <= HOST_BITS_PER_WIDE_INT
2664 || val != HOST_BITS_PER_WIDE_INT - 1))
2665 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2667 /* Likewise for multipliers wider than a word. */
2668 if (CONST_DOUBLE_AS_INT_P (trueop1)
2669 && GET_MODE (op0) == mode
2670 && CONST_DOUBLE_LOW (trueop1) == 0
2671 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2672 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2673 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2674 return simplify_gen_binary (ASHIFT, mode, op0,
2675 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2677 /* x*2 is x+x and x*(-1) is -x */
2678 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2679 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2680 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2681 && GET_MODE (op0) == mode)
2683 REAL_VALUE_TYPE d;
2684 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2686 if (REAL_VALUES_EQUAL (d, dconst2))
2687 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2689 if (!HONOR_SNANS (mode)
2690 && REAL_VALUES_EQUAL (d, dconstm1))
2691 return simplify_gen_unary (NEG, mode, op0, mode);
2694 /* Optimize -x * -x as x * x. */
2695 if (FLOAT_MODE_P (mode)
2696 && GET_CODE (op0) == NEG
2697 && GET_CODE (op1) == NEG
2698 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2699 && !side_effects_p (XEXP (op0, 0)))
2700 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2702 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2703 if (SCALAR_FLOAT_MODE_P (mode)
2704 && GET_CODE (op0) == ABS
2705 && GET_CODE (op1) == ABS
2706 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2707 && !side_effects_p (XEXP (op0, 0)))
2708 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2710 /* Reassociate multiplication, but for floating point MULTs
2711 only when the user specifies unsafe math optimizations. */
2712 if (! FLOAT_MODE_P (mode)
2713 || flag_unsafe_math_optimizations)
2715 tem = simplify_associative_operation (code, mode, op0, op1);
2716 if (tem)
2717 return tem;
2719 break;
2721 case IOR:
2722 if (trueop1 == CONST0_RTX (mode))
2723 return op0;
2724 if (INTEGRAL_MODE_P (mode)
2725 && trueop1 == CONSTM1_RTX (mode)
2726 && !side_effects_p (op0))
2727 return op1;
2728 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2729 return op0;
2730 /* A | (~A) -> -1 */
2731 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2732 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2733 && ! side_effects_p (op0)
2734 && SCALAR_INT_MODE_P (mode))
2735 return constm1_rtx;
2737 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2738 if (CONST_INT_P (op1)
2739 && HWI_COMPUTABLE_MODE_P (mode)
2740 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2741 && !side_effects_p (op0))
2742 return op1;
2744 /* Canonicalize (X & C1) | C2. */
2745 if (GET_CODE (op0) == AND
2746 && CONST_INT_P (trueop1)
2747 && CONST_INT_P (XEXP (op0, 1)))
2749 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2750 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2751 HOST_WIDE_INT c2 = INTVAL (trueop1);
2753 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2754 if ((c1 & c2) == c1
2755 && !side_effects_p (XEXP (op0, 0)))
2756 return trueop1;
2758 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2759 if (((c1|c2) & mask) == mask)
2760 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2762 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2763 if (((c1 & ~c2) & mask) != (c1 & mask))
2765 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2766 gen_int_mode (c1 & ~c2, mode));
2767 return simplify_gen_binary (IOR, mode, tem, op1);
2771 /* Convert (A & B) | A to A. */
2772 if (GET_CODE (op0) == AND
2773 && (rtx_equal_p (XEXP (op0, 0), op1)
2774 || rtx_equal_p (XEXP (op0, 1), op1))
2775 && ! side_effects_p (XEXP (op0, 0))
2776 && ! side_effects_p (XEXP (op0, 1)))
2777 return op1;
2779 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2780 mode size to (rotate A CX). */
2782 if (GET_CODE (op1) == ASHIFT
2783 || GET_CODE (op1) == SUBREG)
2785 opleft = op1;
2786 opright = op0;
2788 else
2790 opright = op1;
2791 opleft = op0;
2794 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2795 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2796 && CONST_INT_P (XEXP (opleft, 1))
2797 && CONST_INT_P (XEXP (opright, 1))
2798 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2799 == GET_MODE_PRECISION (mode)))
2800 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2802 /* Same, but for ashift that has been "simplified" to a wider mode
2803 by simplify_shift_const. */
2805 if (GET_CODE (opleft) == SUBREG
2806 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2807 && GET_CODE (opright) == LSHIFTRT
2808 && GET_CODE (XEXP (opright, 0)) == SUBREG
2809 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2810 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2811 && (GET_MODE_SIZE (GET_MODE (opleft))
2812 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2813 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2814 SUBREG_REG (XEXP (opright, 0)))
2815 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2816 && CONST_INT_P (XEXP (opright, 1))
2817 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2818 == GET_MODE_PRECISION (mode)))
2819 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2820 XEXP (SUBREG_REG (opleft), 1));
2822 /* If we have (ior (and (X C1) C2)), simplify this by making
2823 C1 as small as possible if C1 actually changes. */
2824 if (CONST_INT_P (op1)
2825 && (HWI_COMPUTABLE_MODE_P (mode)
2826 || INTVAL (op1) > 0)
2827 && GET_CODE (op0) == AND
2828 && CONST_INT_P (XEXP (op0, 1))
2829 && CONST_INT_P (op1)
2830 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2832 rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2833 gen_int_mode (UINTVAL (XEXP (op0, 1))
2834 & ~UINTVAL (op1),
2835 mode));
2836 return simplify_gen_binary (IOR, mode, tmp, op1);
2839 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2840 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2841 the PLUS does not affect any of the bits in OP1: then we can do
2842 the IOR as a PLUS and we can associate. This is valid if OP1
2843 can be safely shifted left C bits. */
2844 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2845 && GET_CODE (XEXP (op0, 0)) == PLUS
2846 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2847 && CONST_INT_P (XEXP (op0, 1))
2848 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2850 int count = INTVAL (XEXP (op0, 1));
2851 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2853 if (mask >> count == INTVAL (trueop1)
2854 && trunc_int_for_mode (mask, mode) == mask
2855 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2856 return simplify_gen_binary (ASHIFTRT, mode,
2857 plus_constant (mode, XEXP (op0, 0),
2858 mask),
2859 XEXP (op0, 1));
2862 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2863 if (tem)
2864 return tem;
2866 tem = simplify_associative_operation (code, mode, op0, op1);
2867 if (tem)
2868 return tem;
2869 break;
2871 case XOR:
2872 if (trueop1 == CONST0_RTX (mode))
2873 return op0;
2874 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2875 return simplify_gen_unary (NOT, mode, op0, mode);
2876 if (rtx_equal_p (trueop0, trueop1)
2877 && ! side_effects_p (op0)
2878 && GET_MODE_CLASS (mode) != MODE_CC)
2879 return CONST0_RTX (mode);
2881 /* Canonicalize XOR of the most significant bit to PLUS. */
2882 if (CONST_SCALAR_INT_P (op1)
2883 && mode_signbit_p (mode, op1))
2884 return simplify_gen_binary (PLUS, mode, op0, op1);
2885 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2886 if (CONST_SCALAR_INT_P (op1)
2887 && GET_CODE (op0) == PLUS
2888 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2889 && mode_signbit_p (mode, XEXP (op0, 1)))
2890 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2891 simplify_gen_binary (XOR, mode, op1,
2892 XEXP (op0, 1)));
2894 /* If we are XORing two things that have no bits in common,
2895 convert them into an IOR. This helps to detect rotation encoded
2896 using those methods and possibly other simplifications. */
2898 if (HWI_COMPUTABLE_MODE_P (mode)
2899 && (nonzero_bits (op0, mode)
2900 & nonzero_bits (op1, mode)) == 0)
2901 return (simplify_gen_binary (IOR, mode, op0, op1));
2903 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2904 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2905 (NOT y). */
2907 int num_negated = 0;
2909 if (GET_CODE (op0) == NOT)
2910 num_negated++, op0 = XEXP (op0, 0);
2911 if (GET_CODE (op1) == NOT)
2912 num_negated++, op1 = XEXP (op1, 0);
2914 if (num_negated == 2)
2915 return simplify_gen_binary (XOR, mode, op0, op1);
2916 else if (num_negated == 1)
2917 return simplify_gen_unary (NOT, mode,
2918 simplify_gen_binary (XOR, mode, op0, op1),
2919 mode);
2922 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2923 correspond to a machine insn or result in further simplifications
2924 if B is a constant. */
2926 if (GET_CODE (op0) == AND
2927 && rtx_equal_p (XEXP (op0, 1), op1)
2928 && ! side_effects_p (op1))
2929 return simplify_gen_binary (AND, mode,
2930 simplify_gen_unary (NOT, mode,
2931 XEXP (op0, 0), mode),
2932 op1);
2934 else if (GET_CODE (op0) == AND
2935 && rtx_equal_p (XEXP (op0, 0), op1)
2936 && ! side_effects_p (op1))
2937 return simplify_gen_binary (AND, mode,
2938 simplify_gen_unary (NOT, mode,
2939 XEXP (op0, 1), mode),
2940 op1);
2942 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2943 we can transform like this:
2944 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2945 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2946 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2947 Attempt a few simplifications when B and C are both constants. */
2948 if (GET_CODE (op0) == AND
2949 && CONST_INT_P (op1)
2950 && CONST_INT_P (XEXP (op0, 1)))
2952 rtx a = XEXP (op0, 0);
2953 rtx b = XEXP (op0, 1);
2954 rtx c = op1;
2955 HOST_WIDE_INT bval = INTVAL (b);
2956 HOST_WIDE_INT cval = INTVAL (c);
2958 rtx na_c
2959 = simplify_binary_operation (AND, mode,
2960 simplify_gen_unary (NOT, mode, a, mode),
2962 if ((~cval & bval) == 0)
2964 /* Try to simplify ~A&C | ~B&C. */
2965 if (na_c != NULL_RTX)
2966 return simplify_gen_binary (IOR, mode, na_c,
2967 gen_int_mode (~bval & cval, mode));
2969 else
2971 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2972 if (na_c == const0_rtx)
2974 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2975 gen_int_mode (~cval & bval,
2976 mode));
2977 return simplify_gen_binary (IOR, mode, a_nc_b,
2978 gen_int_mode (~bval & cval,
2979 mode));
2984 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2985 comparison if STORE_FLAG_VALUE is 1. */
2986 if (STORE_FLAG_VALUE == 1
2987 && trueop1 == const1_rtx
2988 && COMPARISON_P (op0)
2989 && (reversed = reversed_comparison (op0, mode)))
2990 return reversed;
2992 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2993 is (lt foo (const_int 0)), so we can perform the above
2994 simplification if STORE_FLAG_VALUE is 1. */
2996 if (STORE_FLAG_VALUE == 1
2997 && trueop1 == const1_rtx
2998 && GET_CODE (op0) == LSHIFTRT
2999 && CONST_INT_P (XEXP (op0, 1))
3000 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
3001 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
3003 /* (xor (comparison foo bar) (const_int sign-bit))
3004 when STORE_FLAG_VALUE is the sign bit. */
3005 if (val_signbit_p (mode, STORE_FLAG_VALUE)
3006 && trueop1 == const_true_rtx
3007 && COMPARISON_P (op0)
3008 && (reversed = reversed_comparison (op0, mode)))
3009 return reversed;
3011 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3012 if (tem)
3013 return tem;
3015 tem = simplify_associative_operation (code, mode, op0, op1);
3016 if (tem)
3017 return tem;
3018 break;
3020 case AND:
3021 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3022 return trueop1;
3023 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3024 return op0;
3025 if (HWI_COMPUTABLE_MODE_P (mode))
3027 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3028 HOST_WIDE_INT nzop1;
3029 if (CONST_INT_P (trueop1))
3031 HOST_WIDE_INT val1 = INTVAL (trueop1);
3032 /* If we are turning off bits already known off in OP0, we need
3033 not do an AND. */
3034 if ((nzop0 & ~val1) == 0)
3035 return op0;
3037 nzop1 = nonzero_bits (trueop1, mode);
3038 /* If we are clearing all the nonzero bits, the result is zero. */
3039 if ((nzop1 & nzop0) == 0
3040 && !side_effects_p (op0) && !side_effects_p (op1))
3041 return CONST0_RTX (mode);
3043 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3044 && GET_MODE_CLASS (mode) != MODE_CC)
3045 return op0;
3046 /* A & (~A) -> 0 */
3047 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3048 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3049 && ! side_effects_p (op0)
3050 && GET_MODE_CLASS (mode) != MODE_CC)
3051 return CONST0_RTX (mode);
3053 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3054 there are no nonzero bits of C outside of X's mode. */
3055 if ((GET_CODE (op0) == SIGN_EXTEND
3056 || GET_CODE (op0) == ZERO_EXTEND)
3057 && CONST_INT_P (trueop1)
3058 && HWI_COMPUTABLE_MODE_P (mode)
3059 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3060 & UINTVAL (trueop1)) == 0)
3062 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3063 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3064 gen_int_mode (INTVAL (trueop1),
3065 imode));
3066 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3069 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3070 we might be able to further simplify the AND with X and potentially
3071 remove the truncation altogether. */
3072 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3074 rtx x = XEXP (op0, 0);
3075 enum machine_mode xmode = GET_MODE (x);
3076 tem = simplify_gen_binary (AND, xmode, x,
3077 gen_int_mode (INTVAL (trueop1), xmode));
3078 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3081 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3082 if (GET_CODE (op0) == IOR
3083 && CONST_INT_P (trueop1)
3084 && CONST_INT_P (XEXP (op0, 1)))
3086 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3087 return simplify_gen_binary (IOR, mode,
3088 simplify_gen_binary (AND, mode,
3089 XEXP (op0, 0), op1),
3090 gen_int_mode (tmp, mode));
3093 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3094 insn (and may simplify more). */
3095 if (GET_CODE (op0) == XOR
3096 && rtx_equal_p (XEXP (op0, 0), op1)
3097 && ! side_effects_p (op1))
3098 return simplify_gen_binary (AND, mode,
3099 simplify_gen_unary (NOT, mode,
3100 XEXP (op0, 1), mode),
3101 op1);
3103 if (GET_CODE (op0) == XOR
3104 && rtx_equal_p (XEXP (op0, 1), op1)
3105 && ! side_effects_p (op1))
3106 return simplify_gen_binary (AND, mode,
3107 simplify_gen_unary (NOT, mode,
3108 XEXP (op0, 0), mode),
3109 op1);
3111 /* Similarly for (~(A ^ B)) & A. */
3112 if (GET_CODE (op0) == NOT
3113 && GET_CODE (XEXP (op0, 0)) == XOR
3114 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3115 && ! side_effects_p (op1))
3116 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3118 if (GET_CODE (op0) == NOT
3119 && GET_CODE (XEXP (op0, 0)) == XOR
3120 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3121 && ! side_effects_p (op1))
3122 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3124 /* Convert (A | B) & A to A. */
3125 if (GET_CODE (op0) == IOR
3126 && (rtx_equal_p (XEXP (op0, 0), op1)
3127 || rtx_equal_p (XEXP (op0, 1), op1))
3128 && ! side_effects_p (XEXP (op0, 0))
3129 && ! side_effects_p (XEXP (op0, 1)))
3130 return op1;
3132 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3133 ((A & N) + B) & M -> (A + B) & M
3134 Similarly if (N & M) == 0,
3135 ((A | N) + B) & M -> (A + B) & M
3136 and for - instead of + and/or ^ instead of |.
3137 Also, if (N & M) == 0, then
3138 (A +- N) & M -> A & M. */
3139 if (CONST_INT_P (trueop1)
3140 && HWI_COMPUTABLE_MODE_P (mode)
3141 && ~UINTVAL (trueop1)
3142 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3143 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3145 rtx pmop[2];
3146 int which;
3148 pmop[0] = XEXP (op0, 0);
3149 pmop[1] = XEXP (op0, 1);
3151 if (CONST_INT_P (pmop[1])
3152 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3153 return simplify_gen_binary (AND, mode, pmop[0], op1);
3155 for (which = 0; which < 2; which++)
3157 tem = pmop[which];
3158 switch (GET_CODE (tem))
3160 case AND:
3161 if (CONST_INT_P (XEXP (tem, 1))
3162 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3163 == UINTVAL (trueop1))
3164 pmop[which] = XEXP (tem, 0);
3165 break;
3166 case IOR:
3167 case XOR:
3168 if (CONST_INT_P (XEXP (tem, 1))
3169 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3170 pmop[which] = XEXP (tem, 0);
3171 break;
3172 default:
3173 break;
3177 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3179 tem = simplify_gen_binary (GET_CODE (op0), mode,
3180 pmop[0], pmop[1]);
3181 return simplify_gen_binary (code, mode, tem, op1);
3185 /* (and X (ior (not X) Y) -> (and X Y) */
3186 if (GET_CODE (op1) == IOR
3187 && GET_CODE (XEXP (op1, 0)) == NOT
3188 && op0 == XEXP (XEXP (op1, 0), 0))
3189 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3191 /* (and (ior (not X) Y) X) -> (and X Y) */
3192 if (GET_CODE (op0) == IOR
3193 && GET_CODE (XEXP (op0, 0)) == NOT
3194 && op1 == XEXP (XEXP (op0, 0), 0))
3195 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3197 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3198 if (tem)
3199 return tem;
3201 tem = simplify_associative_operation (code, mode, op0, op1);
3202 if (tem)
3203 return tem;
3204 break;
3206 case UDIV:
3207 /* 0/x is 0 (or x&0 if x has side-effects). */
3208 if (trueop0 == CONST0_RTX (mode))
3210 if (side_effects_p (op1))
3211 return simplify_gen_binary (AND, mode, op1, trueop0);
3212 return trueop0;
3214 /* x/1 is x. */
3215 if (trueop1 == CONST1_RTX (mode))
3217 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3218 if (tem)
3219 return tem;
3221 /* Convert divide by power of two into shift. */
3222 if (CONST_INT_P (trueop1)
3223 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3224 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3225 break;
3227 case DIV:
3228 /* Handle floating point and integers separately. */
3229 if (SCALAR_FLOAT_MODE_P (mode))
3231 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3232 safe for modes with NaNs, since 0.0 / 0.0 will then be
3233 NaN rather than 0.0. Nor is it safe for modes with signed
3234 zeros, since dividing 0 by a negative number gives -0.0 */
3235 if (trueop0 == CONST0_RTX (mode)
3236 && !HONOR_NANS (mode)
3237 && !HONOR_SIGNED_ZEROS (mode)
3238 && ! side_effects_p (op1))
3239 return op0;
3240 /* x/1.0 is x. */
3241 if (trueop1 == CONST1_RTX (mode)
3242 && !HONOR_SNANS (mode))
3243 return op0;
3245 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3246 && trueop1 != CONST0_RTX (mode))
3248 REAL_VALUE_TYPE d;
3249 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3251 /* x/-1.0 is -x. */
3252 if (REAL_VALUES_EQUAL (d, dconstm1)
3253 && !HONOR_SNANS (mode))
3254 return simplify_gen_unary (NEG, mode, op0, mode);
3256 /* Change FP division by a constant into multiplication.
3257 Only do this with -freciprocal-math. */
3258 if (flag_reciprocal_math
3259 && !REAL_VALUES_EQUAL (d, dconst0))
3261 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3262 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3263 return simplify_gen_binary (MULT, mode, op0, tem);
3267 else if (SCALAR_INT_MODE_P (mode))
3269 /* 0/x is 0 (or x&0 if x has side-effects). */
3270 if (trueop0 == CONST0_RTX (mode)
3271 && !cfun->can_throw_non_call_exceptions)
3273 if (side_effects_p (op1))
3274 return simplify_gen_binary (AND, mode, op1, trueop0);
3275 return trueop0;
3277 /* x/1 is x. */
3278 if (trueop1 == CONST1_RTX (mode))
3280 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3281 if (tem)
3282 return tem;
3284 /* x/-1 is -x. */
3285 if (trueop1 == constm1_rtx)
3287 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3288 if (x)
3289 return simplify_gen_unary (NEG, mode, x, mode);
3292 break;
3294 case UMOD:
3295 /* 0%x is 0 (or x&0 if x has side-effects). */
3296 if (trueop0 == CONST0_RTX (mode))
3298 if (side_effects_p (op1))
3299 return simplify_gen_binary (AND, mode, op1, trueop0);
3300 return trueop0;
3302 /* x%1 is 0 (of x&0 if x has side-effects). */
3303 if (trueop1 == CONST1_RTX (mode))
3305 if (side_effects_p (op0))
3306 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3307 return CONST0_RTX (mode);
3309 /* Implement modulus by power of two as AND. */
3310 if (CONST_INT_P (trueop1)
3311 && exact_log2 (UINTVAL (trueop1)) > 0)
3312 return simplify_gen_binary (AND, mode, op0,
3313 gen_int_mode (INTVAL (op1) - 1, mode));
3314 break;
3316 case MOD:
3317 /* 0%x is 0 (or x&0 if x has side-effects). */
3318 if (trueop0 == CONST0_RTX (mode))
3320 if (side_effects_p (op1))
3321 return simplify_gen_binary (AND, mode, op1, trueop0);
3322 return trueop0;
3324 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3325 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3327 if (side_effects_p (op0))
3328 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3329 return CONST0_RTX (mode);
3331 break;
3333 case ROTATERT:
3334 case ROTATE:
3335 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3336 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3337 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3338 amount instead. */
3339 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3340 if (CONST_INT_P (trueop1)
3341 && IN_RANGE (INTVAL (trueop1),
3342 GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3343 GET_MODE_BITSIZE (mode) - 1))
3344 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3345 mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3346 - INTVAL (trueop1)));
3347 #endif
3348 /* FALLTHRU */
3349 case ASHIFTRT:
3350 if (trueop1 == CONST0_RTX (mode))
3351 return op0;
3352 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3353 return op0;
3354 /* Rotating ~0 always results in ~0. */
3355 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3356 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3357 && ! side_effects_p (op1))
3358 return op0;
3359 canonicalize_shift:
3360 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3362 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3363 if (val != INTVAL (op1))
3364 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3366 break;
3368 case ASHIFT:
3369 case SS_ASHIFT:
3370 case US_ASHIFT:
3371 if (trueop1 == CONST0_RTX (mode))
3372 return op0;
3373 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3374 return op0;
3375 goto canonicalize_shift;
3377 case LSHIFTRT:
3378 if (trueop1 == CONST0_RTX (mode))
3379 return op0;
3380 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3381 return op0;
3382 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3383 if (GET_CODE (op0) == CLZ
3384 && CONST_INT_P (trueop1)
3385 && STORE_FLAG_VALUE == 1
3386 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3388 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3389 unsigned HOST_WIDE_INT zero_val = 0;
3391 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3392 && zero_val == GET_MODE_PRECISION (imode)
3393 && INTVAL (trueop1) == exact_log2 (zero_val))
3394 return simplify_gen_relational (EQ, mode, imode,
3395 XEXP (op0, 0), const0_rtx);
3397 goto canonicalize_shift;
3399 case SMIN:
3400 if (width <= HOST_BITS_PER_WIDE_INT
3401 && mode_signbit_p (mode, trueop1)
3402 && ! side_effects_p (op0))
3403 return op1;
3404 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3405 return op0;
3406 tem = simplify_associative_operation (code, mode, op0, op1);
3407 if (tem)
3408 return tem;
3409 break;
3411 case SMAX:
3412 if (width <= HOST_BITS_PER_WIDE_INT
3413 && CONST_INT_P (trueop1)
3414 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3415 && ! side_effects_p (op0))
3416 return op1;
3417 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3418 return op0;
3419 tem = simplify_associative_operation (code, mode, op0, op1);
3420 if (tem)
3421 return tem;
3422 break;
3424 case UMIN:
3425 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3426 return op1;
3427 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3428 return op0;
3429 tem = simplify_associative_operation (code, mode, op0, op1);
3430 if (tem)
3431 return tem;
3432 break;
3434 case UMAX:
3435 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3436 return op1;
3437 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3438 return op0;
3439 tem = simplify_associative_operation (code, mode, op0, op1);
3440 if (tem)
3441 return tem;
3442 break;
3444 case SS_PLUS:
3445 case US_PLUS:
3446 case SS_MINUS:
3447 case US_MINUS:
3448 case SS_MULT:
3449 case US_MULT:
3450 case SS_DIV:
3451 case US_DIV:
3452 /* ??? There are simplifications that can be done. */
3453 return 0;
3455 case VEC_SELECT:
3456 if (!VECTOR_MODE_P (mode))
3458 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3459 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3460 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3461 gcc_assert (XVECLEN (trueop1, 0) == 1);
3462 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3464 if (GET_CODE (trueop0) == CONST_VECTOR)
3465 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3466 (trueop1, 0, 0)));
3468 /* Extract a scalar element from a nested VEC_SELECT expression
3469 (with optional nested VEC_CONCAT expression). Some targets
3470 (i386) extract scalar element from a vector using chain of
3471 nested VEC_SELECT expressions. When input operand is a memory
3472 operand, this operation can be simplified to a simple scalar
3473 load from an offseted memory address. */
3474 if (GET_CODE (trueop0) == VEC_SELECT)
3476 rtx op0 = XEXP (trueop0, 0);
3477 rtx op1 = XEXP (trueop0, 1);
3479 enum machine_mode opmode = GET_MODE (op0);
3480 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3481 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3483 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3484 int elem;
3486 rtvec vec;
3487 rtx tmp_op, tmp;
3489 gcc_assert (GET_CODE (op1) == PARALLEL);
3490 gcc_assert (i < n_elts);
3492 /* Select element, pointed by nested selector. */
3493 elem = INTVAL (XVECEXP (op1, 0, i));
3495 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3496 if (GET_CODE (op0) == VEC_CONCAT)
3498 rtx op00 = XEXP (op0, 0);
3499 rtx op01 = XEXP (op0, 1);
3501 enum machine_mode mode00, mode01;
3502 int n_elts00, n_elts01;
3504 mode00 = GET_MODE (op00);
3505 mode01 = GET_MODE (op01);
3507 /* Find out number of elements of each operand. */
3508 if (VECTOR_MODE_P (mode00))
3510 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3511 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3513 else
3514 n_elts00 = 1;
3516 if (VECTOR_MODE_P (mode01))
3518 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3519 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3521 else
3522 n_elts01 = 1;
3524 gcc_assert (n_elts == n_elts00 + n_elts01);
3526 /* Select correct operand of VEC_CONCAT
3527 and adjust selector. */
3528 if (elem < n_elts01)
3529 tmp_op = op00;
3530 else
3532 tmp_op = op01;
3533 elem -= n_elts00;
3536 else
3537 tmp_op = op0;
3539 vec = rtvec_alloc (1);
3540 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3542 tmp = gen_rtx_fmt_ee (code, mode,
3543 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3544 return tmp;
3546 if (GET_CODE (trueop0) == VEC_DUPLICATE
3547 && GET_MODE (XEXP (trueop0, 0)) == mode)
3548 return XEXP (trueop0, 0);
3550 else
3552 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3553 gcc_assert (GET_MODE_INNER (mode)
3554 == GET_MODE_INNER (GET_MODE (trueop0)));
3555 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3557 if (GET_CODE (trueop0) == CONST_VECTOR)
3559 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3560 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3561 rtvec v = rtvec_alloc (n_elts);
3562 unsigned int i;
3564 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3565 for (i = 0; i < n_elts; i++)
3567 rtx x = XVECEXP (trueop1, 0, i);
3569 gcc_assert (CONST_INT_P (x));
3570 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3571 INTVAL (x));
3574 return gen_rtx_CONST_VECTOR (mode, v);
3577 /* Recognize the identity. */
3578 if (GET_MODE (trueop0) == mode)
3580 bool maybe_ident = true;
3581 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3583 rtx j = XVECEXP (trueop1, 0, i);
3584 if (!CONST_INT_P (j) || INTVAL (j) != i)
3586 maybe_ident = false;
3587 break;
3590 if (maybe_ident)
3591 return trueop0;
3594 /* If we build {a,b} then permute it, build the result directly. */
3595 if (XVECLEN (trueop1, 0) == 2
3596 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3597 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3598 && GET_CODE (trueop0) == VEC_CONCAT
3599 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3600 && GET_MODE (XEXP (trueop0, 0)) == mode
3601 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3602 && GET_MODE (XEXP (trueop0, 1)) == mode)
3604 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3605 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3606 rtx subop0, subop1;
3608 gcc_assert (i0 < 4 && i1 < 4);
3609 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3610 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3612 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3615 if (XVECLEN (trueop1, 0) == 2
3616 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3617 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3618 && GET_CODE (trueop0) == VEC_CONCAT
3619 && GET_MODE (trueop0) == mode)
3621 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3622 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3623 rtx subop0, subop1;
3625 gcc_assert (i0 < 2 && i1 < 2);
3626 subop0 = XEXP (trueop0, i0);
3627 subop1 = XEXP (trueop0, i1);
3629 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3633 if (XVECLEN (trueop1, 0) == 1
3634 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3635 && GET_CODE (trueop0) == VEC_CONCAT)
3637 rtx vec = trueop0;
3638 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3640 /* Try to find the element in the VEC_CONCAT. */
3641 while (GET_MODE (vec) != mode
3642 && GET_CODE (vec) == VEC_CONCAT)
3644 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3645 if (offset < vec_size)
3646 vec = XEXP (vec, 0);
3647 else
3649 offset -= vec_size;
3650 vec = XEXP (vec, 1);
3652 vec = avoid_constant_pool_reference (vec);
3655 if (GET_MODE (vec) == mode)
3656 return vec;
3659 /* If we select elements in a vec_merge that all come from the same
3660 operand, select from that operand directly. */
3661 if (GET_CODE (op0) == VEC_MERGE)
3663 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3664 if (CONST_INT_P (trueop02))
3666 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3667 bool all_operand0 = true;
3668 bool all_operand1 = true;
3669 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3671 rtx j = XVECEXP (trueop1, 0, i);
3672 if (sel & (1 << UINTVAL (j)))
3673 all_operand1 = false;
3674 else
3675 all_operand0 = false;
3677 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3678 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3679 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3680 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3684 return 0;
3685 case VEC_CONCAT:
3687 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3688 ? GET_MODE (trueop0)
3689 : GET_MODE_INNER (mode));
3690 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3691 ? GET_MODE (trueop1)
3692 : GET_MODE_INNER (mode));
3694 gcc_assert (VECTOR_MODE_P (mode));
3695 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3696 == GET_MODE_SIZE (mode));
3698 if (VECTOR_MODE_P (op0_mode))
3699 gcc_assert (GET_MODE_INNER (mode)
3700 == GET_MODE_INNER (op0_mode));
3701 else
3702 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3704 if (VECTOR_MODE_P (op1_mode))
3705 gcc_assert (GET_MODE_INNER (mode)
3706 == GET_MODE_INNER (op1_mode));
3707 else
3708 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3710 if ((GET_CODE (trueop0) == CONST_VECTOR
3711 || CONST_SCALAR_INT_P (trueop0)
3712 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3713 && (GET_CODE (trueop1) == CONST_VECTOR
3714 || CONST_SCALAR_INT_P (trueop1)
3715 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3717 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3718 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3719 rtvec v = rtvec_alloc (n_elts);
3720 unsigned int i;
3721 unsigned in_n_elts = 1;
3723 if (VECTOR_MODE_P (op0_mode))
3724 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3725 for (i = 0; i < n_elts; i++)
3727 if (i < in_n_elts)
3729 if (!VECTOR_MODE_P (op0_mode))
3730 RTVEC_ELT (v, i) = trueop0;
3731 else
3732 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3734 else
3736 if (!VECTOR_MODE_P (op1_mode))
3737 RTVEC_ELT (v, i) = trueop1;
3738 else
3739 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3740 i - in_n_elts);
3744 return gen_rtx_CONST_VECTOR (mode, v);
3747 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3748 Restrict the transformation to avoid generating a VEC_SELECT with a
3749 mode unrelated to its operand. */
3750 if (GET_CODE (trueop0) == VEC_SELECT
3751 && GET_CODE (trueop1) == VEC_SELECT
3752 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3753 && GET_MODE (XEXP (trueop0, 0)) == mode)
3755 rtx par0 = XEXP (trueop0, 1);
3756 rtx par1 = XEXP (trueop1, 1);
3757 int len0 = XVECLEN (par0, 0);
3758 int len1 = XVECLEN (par1, 0);
3759 rtvec vec = rtvec_alloc (len0 + len1);
3760 for (int i = 0; i < len0; i++)
3761 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3762 for (int i = 0; i < len1; i++)
3763 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3764 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3765 gen_rtx_PARALLEL (VOIDmode, vec));
3768 return 0;
3770 default:
3771 gcc_unreachable ();
3774 return 0;
3778 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3779 rtx op0, rtx op1)
3781 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3782 HOST_WIDE_INT val;
3783 unsigned int width = GET_MODE_PRECISION (mode);
3785 if (VECTOR_MODE_P (mode)
3786 && code != VEC_CONCAT
3787 && GET_CODE (op0) == CONST_VECTOR
3788 && GET_CODE (op1) == CONST_VECTOR)
3790 unsigned n_elts = GET_MODE_NUNITS (mode);
3791 enum machine_mode op0mode = GET_MODE (op0);
3792 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3793 enum machine_mode op1mode = GET_MODE (op1);
3794 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3795 rtvec v = rtvec_alloc (n_elts);
3796 unsigned int i;
3798 gcc_assert (op0_n_elts == n_elts);
3799 gcc_assert (op1_n_elts == n_elts);
3800 for (i = 0; i < n_elts; i++)
3802 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3803 CONST_VECTOR_ELT (op0, i),
3804 CONST_VECTOR_ELT (op1, i));
3805 if (!x)
3806 return 0;
3807 RTVEC_ELT (v, i) = x;
3810 return gen_rtx_CONST_VECTOR (mode, v);
3813 if (VECTOR_MODE_P (mode)
3814 && code == VEC_CONCAT
3815 && (CONST_SCALAR_INT_P (op0)
3816 || GET_CODE (op0) == CONST_FIXED
3817 || CONST_DOUBLE_AS_FLOAT_P (op0))
3818 && (CONST_SCALAR_INT_P (op1)
3819 || CONST_DOUBLE_AS_FLOAT_P (op1)
3820 || GET_CODE (op1) == CONST_FIXED))
3822 unsigned n_elts = GET_MODE_NUNITS (mode);
3823 rtvec v = rtvec_alloc (n_elts);
3825 gcc_assert (n_elts >= 2);
3826 if (n_elts == 2)
3828 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3829 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3831 RTVEC_ELT (v, 0) = op0;
3832 RTVEC_ELT (v, 1) = op1;
3834 else
3836 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3837 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3838 unsigned i;
3840 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3841 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3842 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3844 for (i = 0; i < op0_n_elts; ++i)
3845 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3846 for (i = 0; i < op1_n_elts; ++i)
3847 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3850 return gen_rtx_CONST_VECTOR (mode, v);
3853 if (SCALAR_FLOAT_MODE_P (mode)
3854 && CONST_DOUBLE_AS_FLOAT_P (op0)
3855 && CONST_DOUBLE_AS_FLOAT_P (op1)
3856 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3858 if (code == AND
3859 || code == IOR
3860 || code == XOR)
3862 long tmp0[4];
3863 long tmp1[4];
3864 REAL_VALUE_TYPE r;
3865 int i;
3867 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3868 GET_MODE (op0));
3869 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3870 GET_MODE (op1));
3871 for (i = 0; i < 4; i++)
3873 switch (code)
3875 case AND:
3876 tmp0[i] &= tmp1[i];
3877 break;
3878 case IOR:
3879 tmp0[i] |= tmp1[i];
3880 break;
3881 case XOR:
3882 tmp0[i] ^= tmp1[i];
3883 break;
3884 default:
3885 gcc_unreachable ();
3888 real_from_target (&r, tmp0, mode);
3889 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3891 else
3893 REAL_VALUE_TYPE f0, f1, value, result;
3894 bool inexact;
3896 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3897 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3898 real_convert (&f0, mode, &f0);
3899 real_convert (&f1, mode, &f1);
3901 if (HONOR_SNANS (mode)
3902 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3903 return 0;
3905 if (code == DIV
3906 && REAL_VALUES_EQUAL (f1, dconst0)
3907 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3908 return 0;
3910 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3911 && flag_trapping_math
3912 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3914 int s0 = REAL_VALUE_NEGATIVE (f0);
3915 int s1 = REAL_VALUE_NEGATIVE (f1);
3917 switch (code)
3919 case PLUS:
3920 /* Inf + -Inf = NaN plus exception. */
3921 if (s0 != s1)
3922 return 0;
3923 break;
3924 case MINUS:
3925 /* Inf - Inf = NaN plus exception. */
3926 if (s0 == s1)
3927 return 0;
3928 break;
3929 case DIV:
3930 /* Inf / Inf = NaN plus exception. */
3931 return 0;
3932 default:
3933 break;
3937 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3938 && flag_trapping_math
3939 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3940 || (REAL_VALUE_ISINF (f1)
3941 && REAL_VALUES_EQUAL (f0, dconst0))))
3942 /* Inf * 0 = NaN plus exception. */
3943 return 0;
3945 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3946 &f0, &f1);
3947 real_convert (&result, mode, &value);
3949 /* Don't constant fold this floating point operation if
3950 the result has overflowed and flag_trapping_math. */
3952 if (flag_trapping_math
3953 && MODE_HAS_INFINITIES (mode)
3954 && REAL_VALUE_ISINF (result)
3955 && !REAL_VALUE_ISINF (f0)
3956 && !REAL_VALUE_ISINF (f1))
3957 /* Overflow plus exception. */
3958 return 0;
3960 /* Don't constant fold this floating point operation if the
3961 result may dependent upon the run-time rounding mode and
3962 flag_rounding_math is set, or if GCC's software emulation
3963 is unable to accurately represent the result. */
3965 if ((flag_rounding_math
3966 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3967 && (inexact || !real_identical (&result, &value)))
3968 return NULL_RTX;
3970 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3974 /* We can fold some multi-word operations. */
3975 if (GET_MODE_CLASS (mode) == MODE_INT
3976 && width == HOST_BITS_PER_DOUBLE_INT
3977 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3978 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3980 double_int o0, o1, res, tmp;
3981 bool overflow;
3983 o0 = rtx_to_double_int (op0);
3984 o1 = rtx_to_double_int (op1);
3986 switch (code)
3988 case MINUS:
3989 /* A - B == A + (-B). */
3990 o1 = -o1;
3992 /* Fall through.... */
3994 case PLUS:
3995 res = o0 + o1;
3996 break;
3998 case MULT:
3999 res = o0 * o1;
4000 break;
4002 case DIV:
4003 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
4004 &tmp, &overflow);
4005 if (overflow)
4006 return 0;
4007 break;
4009 case MOD:
4010 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
4011 &res, &overflow);
4012 if (overflow)
4013 return 0;
4014 break;
4016 case UDIV:
4017 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4018 &tmp, &overflow);
4019 if (overflow)
4020 return 0;
4021 break;
4023 case UMOD:
4024 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4025 &res, &overflow);
4026 if (overflow)
4027 return 0;
4028 break;
4030 case AND:
4031 res = o0 & o1;
4032 break;
4034 case IOR:
4035 res = o0 | o1;
4036 break;
4038 case XOR:
4039 res = o0 ^ o1;
4040 break;
4042 case SMIN:
4043 res = o0.smin (o1);
4044 break;
4046 case SMAX:
4047 res = o0.smax (o1);
4048 break;
4050 case UMIN:
4051 res = o0.umin (o1);
4052 break;
4054 case UMAX:
4055 res = o0.umax (o1);
4056 break;
4058 case LSHIFTRT: case ASHIFTRT:
4059 case ASHIFT:
4060 case ROTATE: case ROTATERT:
4062 unsigned HOST_WIDE_INT cnt;
4064 if (SHIFT_COUNT_TRUNCATED)
4066 o1.high = 0;
4067 o1.low &= GET_MODE_PRECISION (mode) - 1;
4070 if (!o1.fits_uhwi ()
4071 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
4072 return 0;
4074 cnt = o1.to_uhwi ();
4075 unsigned short prec = GET_MODE_PRECISION (mode);
4077 if (code == LSHIFTRT || code == ASHIFTRT)
4078 res = o0.rshift (cnt, prec, code == ASHIFTRT);
4079 else if (code == ASHIFT)
4080 res = o0.alshift (cnt, prec);
4081 else if (code == ROTATE)
4082 res = o0.lrotate (cnt, prec);
4083 else /* code == ROTATERT */
4084 res = o0.rrotate (cnt, prec);
4086 break;
4088 default:
4089 return 0;
4092 return immed_double_int_const (res, mode);
4095 if (CONST_INT_P (op0) && CONST_INT_P (op1)
4096 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4098 /* Get the integer argument values in two forms:
4099 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4101 arg0 = INTVAL (op0);
4102 arg1 = INTVAL (op1);
4104 if (width < HOST_BITS_PER_WIDE_INT)
4106 arg0 &= GET_MODE_MASK (mode);
4107 arg1 &= GET_MODE_MASK (mode);
4109 arg0s = arg0;
4110 if (val_signbit_known_set_p (mode, arg0s))
4111 arg0s |= ~GET_MODE_MASK (mode);
4113 arg1s = arg1;
4114 if (val_signbit_known_set_p (mode, arg1s))
4115 arg1s |= ~GET_MODE_MASK (mode);
4117 else
4119 arg0s = arg0;
4120 arg1s = arg1;
4123 /* Compute the value of the arithmetic. */
4125 switch (code)
4127 case PLUS:
4128 val = (unsigned HOST_WIDE_INT) arg0s + arg1s;
4129 break;
4131 case MINUS:
4132 val = (unsigned HOST_WIDE_INT) arg0s - arg1s;
4133 break;
4135 case MULT:
4136 val = (unsigned HOST_WIDE_INT) arg0s * arg1s;
4137 break;
4139 case DIV:
4140 if (arg1s == 0
4141 || ((unsigned HOST_WIDE_INT) arg0s
4142 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4143 && arg1s == -1))
4144 return 0;
4145 val = arg0s / arg1s;
4146 break;
4148 case MOD:
4149 if (arg1s == 0
4150 || ((unsigned HOST_WIDE_INT) arg0s
4151 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4152 && arg1s == -1))
4153 return 0;
4154 val = arg0s % arg1s;
4155 break;
4157 case UDIV:
4158 if (arg1 == 0
4159 || ((unsigned HOST_WIDE_INT) arg0s
4160 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4161 && arg1s == -1))
4162 return 0;
4163 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4164 break;
4166 case UMOD:
4167 if (arg1 == 0
4168 || ((unsigned HOST_WIDE_INT) arg0s
4169 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4170 && arg1s == -1))
4171 return 0;
4172 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4173 break;
4175 case AND:
4176 val = arg0 & arg1;
4177 break;
4179 case IOR:
4180 val = arg0 | arg1;
4181 break;
4183 case XOR:
4184 val = arg0 ^ arg1;
4185 break;
4187 case LSHIFTRT:
4188 case ASHIFT:
4189 case ASHIFTRT:
4190 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4191 the value is in range. We can't return any old value for
4192 out-of-range arguments because either the middle-end (via
4193 shift_truncation_mask) or the back-end might be relying on
4194 target-specific knowledge. Nor can we rely on
4195 shift_truncation_mask, since the shift might not be part of an
4196 ashlM3, lshrM3 or ashrM3 instruction. */
4197 if (SHIFT_COUNT_TRUNCATED)
4198 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4199 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4200 return 0;
4202 val = (code == ASHIFT
4203 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4204 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4206 /* Sign-extend the result for arithmetic right shifts. */
4207 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4208 val |= HOST_WIDE_INT_M1U << (width - arg1);
4209 break;
4211 case ROTATERT:
4212 if (arg1 < 0)
4213 return 0;
4215 arg1 %= width;
4216 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4217 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4218 break;
4220 case ROTATE:
4221 if (arg1 < 0)
4222 return 0;
4224 arg1 %= width;
4225 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4226 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4227 break;
4229 case COMPARE:
4230 /* Do nothing here. */
4231 return 0;
4233 case SMIN:
4234 val = arg0s <= arg1s ? arg0s : arg1s;
4235 break;
4237 case UMIN:
4238 val = ((unsigned HOST_WIDE_INT) arg0
4239 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4240 break;
4242 case SMAX:
4243 val = arg0s > arg1s ? arg0s : arg1s;
4244 break;
4246 case UMAX:
4247 val = ((unsigned HOST_WIDE_INT) arg0
4248 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4249 break;
4251 case SS_PLUS:
4252 case US_PLUS:
4253 case SS_MINUS:
4254 case US_MINUS:
4255 case SS_MULT:
4256 case US_MULT:
4257 case SS_DIV:
4258 case US_DIV:
4259 case SS_ASHIFT:
4260 case US_ASHIFT:
4261 /* ??? There are simplifications that can be done. */
4262 return 0;
4264 default:
4265 gcc_unreachable ();
4268 return gen_int_mode (val, mode);
4271 return NULL_RTX;
4276 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4277 PLUS or MINUS.
4279 Rather than test for specific case, we do this by a brute-force method
4280 and do all possible simplifications until no more changes occur. Then
4281 we rebuild the operation. */
4283 struct simplify_plus_minus_op_data
4285 rtx op;
4286 short neg;
4289 static bool
4290 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4292 int result;
4294 result = (commutative_operand_precedence (y)
4295 - commutative_operand_precedence (x));
4296 if (result)
4297 return result > 0;
4299 /* Group together equal REGs to do more simplification. */
4300 if (REG_P (x) && REG_P (y))
4301 return REGNO (x) > REGNO (y);
4302 else
4303 return false;
4306 static rtx
4307 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4308 rtx op1)
4310 struct simplify_plus_minus_op_data ops[8];
4311 rtx result, tem;
4312 int n_ops = 2, input_ops = 2;
4313 int changed, n_constants = 0, canonicalized = 0;
4314 int i, j;
4316 memset (ops, 0, sizeof ops);
4318 /* Set up the two operands and then expand them until nothing has been
4319 changed. If we run out of room in our array, give up; this should
4320 almost never happen. */
4322 ops[0].op = op0;
4323 ops[0].neg = 0;
4324 ops[1].op = op1;
4325 ops[1].neg = (code == MINUS);
4329 changed = 0;
4331 for (i = 0; i < n_ops; i++)
4333 rtx this_op = ops[i].op;
4334 int this_neg = ops[i].neg;
4335 enum rtx_code this_code = GET_CODE (this_op);
4337 switch (this_code)
4339 case PLUS:
4340 case MINUS:
4341 if (n_ops == 7)
4342 return NULL_RTX;
4344 ops[n_ops].op = XEXP (this_op, 1);
4345 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4346 n_ops++;
4348 ops[i].op = XEXP (this_op, 0);
4349 input_ops++;
4350 changed = 1;
4351 canonicalized |= this_neg;
4352 break;
4354 case NEG:
4355 ops[i].op = XEXP (this_op, 0);
4356 ops[i].neg = ! this_neg;
4357 changed = 1;
4358 canonicalized = 1;
4359 break;
4361 case CONST:
4362 if (n_ops < 7
4363 && GET_CODE (XEXP (this_op, 0)) == PLUS
4364 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4365 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4367 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4368 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4369 ops[n_ops].neg = this_neg;
4370 n_ops++;
4371 changed = 1;
4372 canonicalized = 1;
4374 break;
4376 case NOT:
4377 /* ~a -> (-a - 1) */
4378 if (n_ops != 7)
4380 ops[n_ops].op = CONSTM1_RTX (mode);
4381 ops[n_ops++].neg = this_neg;
4382 ops[i].op = XEXP (this_op, 0);
4383 ops[i].neg = !this_neg;
4384 changed = 1;
4385 canonicalized = 1;
4387 break;
4389 case CONST_INT:
4390 n_constants++;
4391 if (this_neg)
4393 ops[i].op = neg_const_int (mode, this_op);
4394 ops[i].neg = 0;
4395 changed = 1;
4396 canonicalized = 1;
4398 break;
4400 default:
4401 break;
4405 while (changed);
4407 if (n_constants > 1)
4408 canonicalized = 1;
4410 gcc_assert (n_ops >= 2);
4412 /* If we only have two operands, we can avoid the loops. */
4413 if (n_ops == 2)
4415 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4416 rtx lhs, rhs;
4418 /* Get the two operands. Be careful with the order, especially for
4419 the cases where code == MINUS. */
4420 if (ops[0].neg && ops[1].neg)
4422 lhs = gen_rtx_NEG (mode, ops[0].op);
4423 rhs = ops[1].op;
4425 else if (ops[0].neg)
4427 lhs = ops[1].op;
4428 rhs = ops[0].op;
4430 else
4432 lhs = ops[0].op;
4433 rhs = ops[1].op;
4436 return simplify_const_binary_operation (code, mode, lhs, rhs);
4439 /* Now simplify each pair of operands until nothing changes. */
4442 /* Insertion sort is good enough for an eight-element array. */
4443 for (i = 1; i < n_ops; i++)
4445 struct simplify_plus_minus_op_data save;
4446 j = i - 1;
4447 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4448 continue;
4450 canonicalized = 1;
4451 save = ops[i];
4453 ops[j + 1] = ops[j];
4454 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4455 ops[j + 1] = save;
4458 changed = 0;
4459 for (i = n_ops - 1; i > 0; i--)
4460 for (j = i - 1; j >= 0; j--)
4462 rtx lhs = ops[j].op, rhs = ops[i].op;
4463 int lneg = ops[j].neg, rneg = ops[i].neg;
4465 if (lhs != 0 && rhs != 0)
4467 enum rtx_code ncode = PLUS;
4469 if (lneg != rneg)
4471 ncode = MINUS;
4472 if (lneg)
4473 tem = lhs, lhs = rhs, rhs = tem;
4475 else if (swap_commutative_operands_p (lhs, rhs))
4476 tem = lhs, lhs = rhs, rhs = tem;
4478 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4479 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4481 rtx tem_lhs, tem_rhs;
4483 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4484 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4485 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4487 if (tem && !CONSTANT_P (tem))
4488 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4490 else
4491 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4493 /* Reject "simplifications" that just wrap the two
4494 arguments in a CONST. Failure to do so can result
4495 in infinite recursion with simplify_binary_operation
4496 when it calls us to simplify CONST operations. */
4497 if (tem
4498 && ! (GET_CODE (tem) == CONST
4499 && GET_CODE (XEXP (tem, 0)) == ncode
4500 && XEXP (XEXP (tem, 0), 0) == lhs
4501 && XEXP (XEXP (tem, 0), 1) == rhs))
4503 lneg &= rneg;
4504 if (GET_CODE (tem) == NEG)
4505 tem = XEXP (tem, 0), lneg = !lneg;
4506 if (CONST_INT_P (tem) && lneg)
4507 tem = neg_const_int (mode, tem), lneg = 0;
4509 ops[i].op = tem;
4510 ops[i].neg = lneg;
4511 ops[j].op = NULL_RTX;
4512 changed = 1;
4513 canonicalized = 1;
4518 /* If nothing changed, fail. */
4519 if (!canonicalized)
4520 return NULL_RTX;
4522 /* Pack all the operands to the lower-numbered entries. */
4523 for (i = 0, j = 0; j < n_ops; j++)
4524 if (ops[j].op)
4526 ops[i] = ops[j];
4527 i++;
4529 n_ops = i;
4531 while (changed);
4533 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4534 if (n_ops == 2
4535 && CONST_INT_P (ops[1].op)
4536 && CONSTANT_P (ops[0].op)
4537 && ops[0].neg)
4538 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4540 /* We suppressed creation of trivial CONST expressions in the
4541 combination loop to avoid recursion. Create one manually now.
4542 The combination loop should have ensured that there is exactly
4543 one CONST_INT, and the sort will have ensured that it is last
4544 in the array and that any other constant will be next-to-last. */
4546 if (n_ops > 1
4547 && CONST_INT_P (ops[n_ops - 1].op)
4548 && CONSTANT_P (ops[n_ops - 2].op))
4550 rtx value = ops[n_ops - 1].op;
4551 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4552 value = neg_const_int (mode, value);
4553 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4554 INTVAL (value));
4555 n_ops--;
4558 /* Put a non-negated operand first, if possible. */
4560 for (i = 0; i < n_ops && ops[i].neg; i++)
4561 continue;
4562 if (i == n_ops)
4563 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4564 else if (i != 0)
4566 tem = ops[0].op;
4567 ops[0] = ops[i];
4568 ops[i].op = tem;
4569 ops[i].neg = 1;
4572 /* Now make the result by performing the requested operations. */
4573 result = ops[0].op;
4574 for (i = 1; i < n_ops; i++)
4575 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4576 mode, result, ops[i].op);
4578 return result;
4581 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4582 static bool
4583 plus_minus_operand_p (const_rtx x)
4585 return GET_CODE (x) == PLUS
4586 || GET_CODE (x) == MINUS
4587 || (GET_CODE (x) == CONST
4588 && GET_CODE (XEXP (x, 0)) == PLUS
4589 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4590 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4593 /* Like simplify_binary_operation except used for relational operators.
4594 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4595 not also be VOIDmode.
4597 CMP_MODE specifies in which mode the comparison is done in, so it is
4598 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4599 the operands or, if both are VOIDmode, the operands are compared in
4600 "infinite precision". */
4602 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4603 enum machine_mode cmp_mode, rtx op0, rtx op1)
4605 rtx tem, trueop0, trueop1;
4607 if (cmp_mode == VOIDmode)
4608 cmp_mode = GET_MODE (op0);
4609 if (cmp_mode == VOIDmode)
4610 cmp_mode = GET_MODE (op1);
4612 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4613 if (tem)
4615 if (SCALAR_FLOAT_MODE_P (mode))
4617 if (tem == const0_rtx)
4618 return CONST0_RTX (mode);
4619 #ifdef FLOAT_STORE_FLAG_VALUE
4621 REAL_VALUE_TYPE val;
4622 val = FLOAT_STORE_FLAG_VALUE (mode);
4623 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4625 #else
4626 return NULL_RTX;
4627 #endif
4629 if (VECTOR_MODE_P (mode))
4631 if (tem == const0_rtx)
4632 return CONST0_RTX (mode);
4633 #ifdef VECTOR_STORE_FLAG_VALUE
4635 int i, units;
4636 rtvec v;
4638 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4639 if (val == NULL_RTX)
4640 return NULL_RTX;
4641 if (val == const1_rtx)
4642 return CONST1_RTX (mode);
4644 units = GET_MODE_NUNITS (mode);
4645 v = rtvec_alloc (units);
4646 for (i = 0; i < units; i++)
4647 RTVEC_ELT (v, i) = val;
4648 return gen_rtx_raw_CONST_VECTOR (mode, v);
4650 #else
4651 return NULL_RTX;
4652 #endif
4655 return tem;
4658 /* For the following tests, ensure const0_rtx is op1. */
4659 if (swap_commutative_operands_p (op0, op1)
4660 || (op0 == const0_rtx && op1 != const0_rtx))
4661 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4663 /* If op0 is a compare, extract the comparison arguments from it. */
4664 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4665 return simplify_gen_relational (code, mode, VOIDmode,
4666 XEXP (op0, 0), XEXP (op0, 1));
4668 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4669 || CC0_P (op0))
4670 return NULL_RTX;
4672 trueop0 = avoid_constant_pool_reference (op0);
4673 trueop1 = avoid_constant_pool_reference (op1);
4674 return simplify_relational_operation_1 (code, mode, cmp_mode,
4675 trueop0, trueop1);
4678 /* This part of simplify_relational_operation is only used when CMP_MODE
4679 is not in class MODE_CC (i.e. it is a real comparison).
4681 MODE is the mode of the result, while CMP_MODE specifies in which
4682 mode the comparison is done in, so it is the mode of the operands. */
4684 static rtx
4685 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4686 enum machine_mode cmp_mode, rtx op0, rtx op1)
4688 enum rtx_code op0code = GET_CODE (op0);
4690 if (op1 == const0_rtx && COMPARISON_P (op0))
4692 /* If op0 is a comparison, extract the comparison arguments
4693 from it. */
4694 if (code == NE)
4696 if (GET_MODE (op0) == mode)
4697 return simplify_rtx (op0);
4698 else
4699 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4700 XEXP (op0, 0), XEXP (op0, 1));
4702 else if (code == EQ)
4704 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4705 if (new_code != UNKNOWN)
4706 return simplify_gen_relational (new_code, mode, VOIDmode,
4707 XEXP (op0, 0), XEXP (op0, 1));
4711 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4712 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4713 if ((code == LTU || code == GEU)
4714 && GET_CODE (op0) == PLUS
4715 && CONST_INT_P (XEXP (op0, 1))
4716 && (rtx_equal_p (op1, XEXP (op0, 0))
4717 || rtx_equal_p (op1, XEXP (op0, 1)))
4718 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4719 && XEXP (op0, 1) != const0_rtx)
4721 rtx new_cmp
4722 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4723 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4724 cmp_mode, XEXP (op0, 0), new_cmp);
4727 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4728 if ((code == LTU || code == GEU)
4729 && GET_CODE (op0) == PLUS
4730 && rtx_equal_p (op1, XEXP (op0, 1))
4731 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4732 && !rtx_equal_p (op1, XEXP (op0, 0)))
4733 return simplify_gen_relational (code, mode, cmp_mode, op0,
4734 copy_rtx (XEXP (op0, 0)));
4736 if (op1 == const0_rtx)
4738 /* Canonicalize (GTU x 0) as (NE x 0). */
4739 if (code == GTU)
4740 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4741 /* Canonicalize (LEU x 0) as (EQ x 0). */
4742 if (code == LEU)
4743 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4745 else if (op1 == const1_rtx)
4747 switch (code)
4749 case GE:
4750 /* Canonicalize (GE x 1) as (GT x 0). */
4751 return simplify_gen_relational (GT, mode, cmp_mode,
4752 op0, const0_rtx);
4753 case GEU:
4754 /* Canonicalize (GEU x 1) as (NE x 0). */
4755 return simplify_gen_relational (NE, mode, cmp_mode,
4756 op0, const0_rtx);
4757 case LT:
4758 /* Canonicalize (LT x 1) as (LE x 0). */
4759 return simplify_gen_relational (LE, mode, cmp_mode,
4760 op0, const0_rtx);
4761 case LTU:
4762 /* Canonicalize (LTU x 1) as (EQ x 0). */
4763 return simplify_gen_relational (EQ, mode, cmp_mode,
4764 op0, const0_rtx);
4765 default:
4766 break;
4769 else if (op1 == constm1_rtx)
4771 /* Canonicalize (LE x -1) as (LT x 0). */
4772 if (code == LE)
4773 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4774 /* Canonicalize (GT x -1) as (GE x 0). */
4775 if (code == GT)
4776 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4779 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4780 if ((code == EQ || code == NE)
4781 && (op0code == PLUS || op0code == MINUS)
4782 && CONSTANT_P (op1)
4783 && CONSTANT_P (XEXP (op0, 1))
4784 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4786 rtx x = XEXP (op0, 0);
4787 rtx c = XEXP (op0, 1);
4788 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4789 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4791 /* Detect an infinite recursive condition, where we oscillate at this
4792 simplification case between:
4793 A + B == C <---> C - B == A,
4794 where A, B, and C are all constants with non-simplifiable expressions,
4795 usually SYMBOL_REFs. */
4796 if (GET_CODE (tem) == invcode
4797 && CONSTANT_P (x)
4798 && rtx_equal_p (c, XEXP (tem, 1)))
4799 return NULL_RTX;
4801 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4804 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4805 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4806 if (code == NE
4807 && op1 == const0_rtx
4808 && GET_MODE_CLASS (mode) == MODE_INT
4809 && cmp_mode != VOIDmode
4810 /* ??? Work-around BImode bugs in the ia64 backend. */
4811 && mode != BImode
4812 && cmp_mode != BImode
4813 && nonzero_bits (op0, cmp_mode) == 1
4814 && STORE_FLAG_VALUE == 1)
4815 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4816 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4817 : lowpart_subreg (mode, op0, cmp_mode);
4819 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4820 if ((code == EQ || code == NE)
4821 && op1 == const0_rtx
4822 && op0code == XOR)
4823 return simplify_gen_relational (code, mode, cmp_mode,
4824 XEXP (op0, 0), XEXP (op0, 1));
4826 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4827 if ((code == EQ || code == NE)
4828 && op0code == XOR
4829 && rtx_equal_p (XEXP (op0, 0), op1)
4830 && !side_effects_p (XEXP (op0, 0)))
4831 return simplify_gen_relational (code, mode, cmp_mode,
4832 XEXP (op0, 1), const0_rtx);
4834 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4835 if ((code == EQ || code == NE)
4836 && op0code == XOR
4837 && rtx_equal_p (XEXP (op0, 1), op1)
4838 && !side_effects_p (XEXP (op0, 1)))
4839 return simplify_gen_relational (code, mode, cmp_mode,
4840 XEXP (op0, 0), const0_rtx);
4842 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4843 if ((code == EQ || code == NE)
4844 && op0code == XOR
4845 && CONST_SCALAR_INT_P (op1)
4846 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4847 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4848 simplify_gen_binary (XOR, cmp_mode,
4849 XEXP (op0, 1), op1));
4851 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4852 if ((code == EQ || code == NE)
4853 && GET_CODE (op0) == BSWAP
4854 && CONST_SCALAR_INT_P (op1))
4855 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4856 simplify_gen_unary (BSWAP, cmp_mode,
4857 op1, cmp_mode));
4859 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4860 if ((code == EQ || code == NE)
4861 && GET_CODE (op0) == BSWAP
4862 && GET_CODE (op1) == BSWAP)
4863 return simplify_gen_relational (code, mode, cmp_mode,
4864 XEXP (op0, 0), XEXP (op1, 0));
4866 if (op0code == POPCOUNT && op1 == const0_rtx)
4867 switch (code)
4869 case EQ:
4870 case LE:
4871 case LEU:
4872 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4873 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4874 XEXP (op0, 0), const0_rtx);
4876 case NE:
4877 case GT:
4878 case GTU:
4879 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4880 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4881 XEXP (op0, 0), const0_rtx);
4883 default:
4884 break;
4887 return NULL_RTX;
4890 enum
4892 CMP_EQ = 1,
4893 CMP_LT = 2,
4894 CMP_GT = 4,
4895 CMP_LTU = 8,
4896 CMP_GTU = 16
4900 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4901 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4902 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4903 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4904 For floating-point comparisons, assume that the operands were ordered. */
4906 static rtx
4907 comparison_result (enum rtx_code code, int known_results)
4909 switch (code)
4911 case EQ:
4912 case UNEQ:
4913 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4914 case NE:
4915 case LTGT:
4916 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4918 case LT:
4919 case UNLT:
4920 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4921 case GE:
4922 case UNGE:
4923 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4925 case GT:
4926 case UNGT:
4927 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4928 case LE:
4929 case UNLE:
4930 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4932 case LTU:
4933 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4934 case GEU:
4935 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4937 case GTU:
4938 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4939 case LEU:
4940 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4942 case ORDERED:
4943 return const_true_rtx;
4944 case UNORDERED:
4945 return const0_rtx;
4946 default:
4947 gcc_unreachable ();
4951 /* Check if the given comparison (done in the given MODE) is actually a
4952 tautology or a contradiction.
4953 If no simplification is possible, this function returns zero.
4954 Otherwise, it returns either const_true_rtx or const0_rtx. */
4957 simplify_const_relational_operation (enum rtx_code code,
4958 enum machine_mode mode,
4959 rtx op0, rtx op1)
4961 rtx tem;
4962 rtx trueop0;
4963 rtx trueop1;
4965 gcc_assert (mode != VOIDmode
4966 || (GET_MODE (op0) == VOIDmode
4967 && GET_MODE (op1) == VOIDmode));
4969 /* If op0 is a compare, extract the comparison arguments from it. */
4970 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4972 op1 = XEXP (op0, 1);
4973 op0 = XEXP (op0, 0);
4975 if (GET_MODE (op0) != VOIDmode)
4976 mode = GET_MODE (op0);
4977 else if (GET_MODE (op1) != VOIDmode)
4978 mode = GET_MODE (op1);
4979 else
4980 return 0;
4983 /* We can't simplify MODE_CC values since we don't know what the
4984 actual comparison is. */
4985 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4986 return 0;
4988 /* Make sure the constant is second. */
4989 if (swap_commutative_operands_p (op0, op1))
4991 tem = op0, op0 = op1, op1 = tem;
4992 code = swap_condition (code);
4995 trueop0 = avoid_constant_pool_reference (op0);
4996 trueop1 = avoid_constant_pool_reference (op1);
4998 /* For integer comparisons of A and B maybe we can simplify A - B and can
4999 then simplify a comparison of that with zero. If A and B are both either
5000 a register or a CONST_INT, this can't help; testing for these cases will
5001 prevent infinite recursion here and speed things up.
5003 We can only do this for EQ and NE comparisons as otherwise we may
5004 lose or introduce overflow which we cannot disregard as undefined as
5005 we do not know the signedness of the operation on either the left or
5006 the right hand side of the comparison. */
5008 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5009 && (code == EQ || code == NE)
5010 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5011 && (REG_P (op1) || CONST_INT_P (trueop1)))
5012 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5013 /* We cannot do this if tem is a nonzero address. */
5014 && ! nonzero_address_p (tem))
5015 return simplify_const_relational_operation (signed_condition (code),
5016 mode, tem, const0_rtx);
5018 if (! HONOR_NANS (mode) && code == ORDERED)
5019 return const_true_rtx;
5021 if (! HONOR_NANS (mode) && code == UNORDERED)
5022 return const0_rtx;
5024 /* For modes without NaNs, if the two operands are equal, we know the
5025 result except if they have side-effects. Even with NaNs we know
5026 the result of unordered comparisons and, if signaling NaNs are
5027 irrelevant, also the result of LT/GT/LTGT. */
5028 if ((! HONOR_NANS (GET_MODE (trueop0))
5029 || code == UNEQ || code == UNLE || code == UNGE
5030 || ((code == LT || code == GT || code == LTGT)
5031 && ! HONOR_SNANS (GET_MODE (trueop0))))
5032 && rtx_equal_p (trueop0, trueop1)
5033 && ! side_effects_p (trueop0))
5034 return comparison_result (code, CMP_EQ);
5036 /* If the operands are floating-point constants, see if we can fold
5037 the result. */
5038 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5039 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5040 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5042 REAL_VALUE_TYPE d0, d1;
5044 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
5045 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
5047 /* Comparisons are unordered iff at least one of the values is NaN. */
5048 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
5049 switch (code)
5051 case UNEQ:
5052 case UNLT:
5053 case UNGT:
5054 case UNLE:
5055 case UNGE:
5056 case NE:
5057 case UNORDERED:
5058 return const_true_rtx;
5059 case EQ:
5060 case LT:
5061 case GT:
5062 case LE:
5063 case GE:
5064 case LTGT:
5065 case ORDERED:
5066 return const0_rtx;
5067 default:
5068 return 0;
5071 return comparison_result (code,
5072 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
5073 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
5076 /* Otherwise, see if the operands are both integers. */
5077 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5078 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
5079 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
5081 int width = GET_MODE_PRECISION (mode);
5082 HOST_WIDE_INT l0s, h0s, l1s, h1s;
5083 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
5085 /* Get the two words comprising each integer constant. */
5086 if (CONST_DOUBLE_AS_INT_P (trueop0))
5088 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
5089 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
5091 else
5093 l0u = l0s = INTVAL (trueop0);
5094 h0u = h0s = HWI_SIGN_EXTEND (l0s);
5097 if (CONST_DOUBLE_AS_INT_P (trueop1))
5099 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
5100 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
5102 else
5104 l1u = l1s = INTVAL (trueop1);
5105 h1u = h1s = HWI_SIGN_EXTEND (l1s);
5108 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5109 we have to sign or zero-extend the values. */
5110 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5112 l0u &= GET_MODE_MASK (mode);
5113 l1u &= GET_MODE_MASK (mode);
5115 if (val_signbit_known_set_p (mode, l0s))
5116 l0s |= ~GET_MODE_MASK (mode);
5118 if (val_signbit_known_set_p (mode, l1s))
5119 l1s |= ~GET_MODE_MASK (mode);
5121 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5122 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5124 if (h0u == h1u && l0u == l1u)
5125 return comparison_result (code, CMP_EQ);
5126 else
5128 int cr;
5129 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5130 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5131 return comparison_result (code, cr);
5135 /* Optimize comparisons with upper and lower bounds. */
5136 if (HWI_COMPUTABLE_MODE_P (mode)
5137 && CONST_INT_P (trueop1))
5139 int sign;
5140 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5141 HOST_WIDE_INT val = INTVAL (trueop1);
5142 HOST_WIDE_INT mmin, mmax;
5144 if (code == GEU
5145 || code == LEU
5146 || code == GTU
5147 || code == LTU)
5148 sign = 0;
5149 else
5150 sign = 1;
5152 /* Get a reduced range if the sign bit is zero. */
5153 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5155 mmin = 0;
5156 mmax = nonzero;
5158 else
5160 rtx mmin_rtx, mmax_rtx;
5161 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5163 mmin = INTVAL (mmin_rtx);
5164 mmax = INTVAL (mmax_rtx);
5165 if (sign)
5167 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5169 mmin >>= (sign_copies - 1);
5170 mmax >>= (sign_copies - 1);
5174 switch (code)
5176 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5177 case GEU:
5178 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5179 return const_true_rtx;
5180 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5181 return const0_rtx;
5182 break;
5183 case GE:
5184 if (val <= mmin)
5185 return const_true_rtx;
5186 if (val > mmax)
5187 return const0_rtx;
5188 break;
5190 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5191 case LEU:
5192 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5193 return const_true_rtx;
5194 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5195 return const0_rtx;
5196 break;
5197 case LE:
5198 if (val >= mmax)
5199 return const_true_rtx;
5200 if (val < mmin)
5201 return const0_rtx;
5202 break;
5204 case EQ:
5205 /* x == y is always false for y out of range. */
5206 if (val < mmin || val > mmax)
5207 return const0_rtx;
5208 break;
5210 /* x > y is always false for y >= mmax, always true for y < mmin. */
5211 case GTU:
5212 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5213 return const0_rtx;
5214 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5215 return const_true_rtx;
5216 break;
5217 case GT:
5218 if (val >= mmax)
5219 return const0_rtx;
5220 if (val < mmin)
5221 return const_true_rtx;
5222 break;
5224 /* x < y is always false for y <= mmin, always true for y > mmax. */
5225 case LTU:
5226 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5227 return const0_rtx;
5228 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5229 return const_true_rtx;
5230 break;
5231 case LT:
5232 if (val <= mmin)
5233 return const0_rtx;
5234 if (val > mmax)
5235 return const_true_rtx;
5236 break;
5238 case NE:
5239 /* x != y is always true for y out of range. */
5240 if (val < mmin || val > mmax)
5241 return const_true_rtx;
5242 break;
5244 default:
5245 break;
5249 /* Optimize integer comparisons with zero. */
5250 if (trueop1 == const0_rtx)
5252 /* Some addresses are known to be nonzero. We don't know
5253 their sign, but equality comparisons are known. */
5254 if (nonzero_address_p (trueop0))
5256 if (code == EQ || code == LEU)
5257 return const0_rtx;
5258 if (code == NE || code == GTU)
5259 return const_true_rtx;
5262 /* See if the first operand is an IOR with a constant. If so, we
5263 may be able to determine the result of this comparison. */
5264 if (GET_CODE (op0) == IOR)
5266 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5267 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5269 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5270 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5271 && (UINTVAL (inner_const)
5272 & ((unsigned HOST_WIDE_INT) 1
5273 << sign_bitnum)));
5275 switch (code)
5277 case EQ:
5278 case LEU:
5279 return const0_rtx;
5280 case NE:
5281 case GTU:
5282 return const_true_rtx;
5283 case LT:
5284 case LE:
5285 if (has_sign)
5286 return const_true_rtx;
5287 break;
5288 case GT:
5289 case GE:
5290 if (has_sign)
5291 return const0_rtx;
5292 break;
5293 default:
5294 break;
5300 /* Optimize comparison of ABS with zero. */
5301 if (trueop1 == CONST0_RTX (mode)
5302 && (GET_CODE (trueop0) == ABS
5303 || (GET_CODE (trueop0) == FLOAT_EXTEND
5304 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5306 switch (code)
5308 case LT:
5309 /* Optimize abs(x) < 0.0. */
5310 if (!HONOR_SNANS (mode)
5311 && (!INTEGRAL_MODE_P (mode)
5312 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5314 if (INTEGRAL_MODE_P (mode)
5315 && (issue_strict_overflow_warning
5316 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5317 warning (OPT_Wstrict_overflow,
5318 ("assuming signed overflow does not occur when "
5319 "assuming abs (x) < 0 is false"));
5320 return const0_rtx;
5322 break;
5324 case GE:
5325 /* Optimize abs(x) >= 0.0. */
5326 if (!HONOR_NANS (mode)
5327 && (!INTEGRAL_MODE_P (mode)
5328 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5330 if (INTEGRAL_MODE_P (mode)
5331 && (issue_strict_overflow_warning
5332 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5333 warning (OPT_Wstrict_overflow,
5334 ("assuming signed overflow does not occur when "
5335 "assuming abs (x) >= 0 is true"));
5336 return const_true_rtx;
5338 break;
5340 case UNGE:
5341 /* Optimize ! (abs(x) < 0.0). */
5342 return const_true_rtx;
5344 default:
5345 break;
5349 return 0;
5352 /* Simplify CODE, an operation with result mode MODE and three operands,
5353 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5354 a constant. Return 0 if no simplifications is possible. */
5357 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5358 enum machine_mode op0_mode, rtx op0, rtx op1,
5359 rtx op2)
5361 unsigned int width = GET_MODE_PRECISION (mode);
5362 bool any_change = false;
5363 rtx tem, trueop2;
5365 /* VOIDmode means "infinite" precision. */
5366 if (width == 0)
5367 width = HOST_BITS_PER_WIDE_INT;
5369 switch (code)
5371 case FMA:
5372 /* Simplify negations around the multiplication. */
5373 /* -a * -b + c => a * b + c. */
5374 if (GET_CODE (op0) == NEG)
5376 tem = simplify_unary_operation (NEG, mode, op1, mode);
5377 if (tem)
5378 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5380 else if (GET_CODE (op1) == NEG)
5382 tem = simplify_unary_operation (NEG, mode, op0, mode);
5383 if (tem)
5384 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5387 /* Canonicalize the two multiplication operands. */
5388 /* a * -b + c => -b * a + c. */
5389 if (swap_commutative_operands_p (op0, op1))
5390 tem = op0, op0 = op1, op1 = tem, any_change = true;
5392 if (any_change)
5393 return gen_rtx_FMA (mode, op0, op1, op2);
5394 return NULL_RTX;
5396 case SIGN_EXTRACT:
5397 case ZERO_EXTRACT:
5398 if (CONST_INT_P (op0)
5399 && CONST_INT_P (op1)
5400 && CONST_INT_P (op2)
5401 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5402 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5404 /* Extracting a bit-field from a constant */
5405 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5406 HOST_WIDE_INT op1val = INTVAL (op1);
5407 HOST_WIDE_INT op2val = INTVAL (op2);
5408 if (BITS_BIG_ENDIAN)
5409 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5410 else
5411 val >>= op2val;
5413 if (HOST_BITS_PER_WIDE_INT != op1val)
5415 /* First zero-extend. */
5416 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5417 /* If desired, propagate sign bit. */
5418 if (code == SIGN_EXTRACT
5419 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5420 != 0)
5421 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5424 return gen_int_mode (val, mode);
5426 break;
5428 case IF_THEN_ELSE:
5429 if (CONST_INT_P (op0))
5430 return op0 != const0_rtx ? op1 : op2;
5432 /* Convert c ? a : a into "a". */
5433 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5434 return op1;
5436 /* Convert a != b ? a : b into "a". */
5437 if (GET_CODE (op0) == NE
5438 && ! side_effects_p (op0)
5439 && ! HONOR_NANS (mode)
5440 && ! HONOR_SIGNED_ZEROS (mode)
5441 && ((rtx_equal_p (XEXP (op0, 0), op1)
5442 && rtx_equal_p (XEXP (op0, 1), op2))
5443 || (rtx_equal_p (XEXP (op0, 0), op2)
5444 && rtx_equal_p (XEXP (op0, 1), op1))))
5445 return op1;
5447 /* Convert a == b ? a : b into "b". */
5448 if (GET_CODE (op0) == EQ
5449 && ! side_effects_p (op0)
5450 && ! HONOR_NANS (mode)
5451 && ! HONOR_SIGNED_ZEROS (mode)
5452 && ((rtx_equal_p (XEXP (op0, 0), op1)
5453 && rtx_equal_p (XEXP (op0, 1), op2))
5454 || (rtx_equal_p (XEXP (op0, 0), op2)
5455 && rtx_equal_p (XEXP (op0, 1), op1))))
5456 return op2;
5458 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5460 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5461 ? GET_MODE (XEXP (op0, 1))
5462 : GET_MODE (XEXP (op0, 0)));
5463 rtx temp;
5465 /* Look for happy constants in op1 and op2. */
5466 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5468 HOST_WIDE_INT t = INTVAL (op1);
5469 HOST_WIDE_INT f = INTVAL (op2);
5471 if (t == STORE_FLAG_VALUE && f == 0)
5472 code = GET_CODE (op0);
5473 else if (t == 0 && f == STORE_FLAG_VALUE)
5475 enum rtx_code tmp;
5476 tmp = reversed_comparison_code (op0, NULL_RTX);
5477 if (tmp == UNKNOWN)
5478 break;
5479 code = tmp;
5481 else
5482 break;
5484 return simplify_gen_relational (code, mode, cmp_mode,
5485 XEXP (op0, 0), XEXP (op0, 1));
5488 if (cmp_mode == VOIDmode)
5489 cmp_mode = op0_mode;
5490 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5491 cmp_mode, XEXP (op0, 0),
5492 XEXP (op0, 1));
5494 /* See if any simplifications were possible. */
5495 if (temp)
5497 if (CONST_INT_P (temp))
5498 return temp == const0_rtx ? op2 : op1;
5499 else if (temp)
5500 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5503 break;
5505 case VEC_MERGE:
5506 gcc_assert (GET_MODE (op0) == mode);
5507 gcc_assert (GET_MODE (op1) == mode);
5508 gcc_assert (VECTOR_MODE_P (mode));
5509 trueop2 = avoid_constant_pool_reference (op2);
5510 if (CONST_INT_P (trueop2))
5512 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5513 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5514 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5515 unsigned HOST_WIDE_INT mask;
5516 if (n_elts == HOST_BITS_PER_WIDE_INT)
5517 mask = -1;
5518 else
5519 mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5521 if (!(sel & mask) && !side_effects_p (op0))
5522 return op1;
5523 if ((sel & mask) == mask && !side_effects_p (op1))
5524 return op0;
5526 rtx trueop0 = avoid_constant_pool_reference (op0);
5527 rtx trueop1 = avoid_constant_pool_reference (op1);
5528 if (GET_CODE (trueop0) == CONST_VECTOR
5529 && GET_CODE (trueop1) == CONST_VECTOR)
5531 rtvec v = rtvec_alloc (n_elts);
5532 unsigned int i;
5534 for (i = 0; i < n_elts; i++)
5535 RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5536 ? CONST_VECTOR_ELT (trueop0, i)
5537 : CONST_VECTOR_ELT (trueop1, i));
5538 return gen_rtx_CONST_VECTOR (mode, v);
5541 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5542 if no element from a appears in the result. */
5543 if (GET_CODE (op0) == VEC_MERGE)
5545 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5546 if (CONST_INT_P (tem))
5548 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5549 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5550 return simplify_gen_ternary (code, mode, mode,
5551 XEXP (op0, 1), op1, op2);
5552 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5553 return simplify_gen_ternary (code, mode, mode,
5554 XEXP (op0, 0), op1, op2);
5557 if (GET_CODE (op1) == VEC_MERGE)
5559 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5560 if (CONST_INT_P (tem))
5562 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5563 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5564 return simplify_gen_ternary (code, mode, mode,
5565 op0, XEXP (op1, 1), op2);
5566 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5567 return simplify_gen_ternary (code, mode, mode,
5568 op0, XEXP (op1, 0), op2);
5573 if (rtx_equal_p (op0, op1)
5574 && !side_effects_p (op2) && !side_effects_p (op1))
5575 return op0;
5577 break;
5579 default:
5580 gcc_unreachable ();
5583 return 0;
5586 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5587 or CONST_VECTOR,
5588 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5590 Works by unpacking OP into a collection of 8-bit values
5591 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5592 and then repacking them again for OUTERMODE. */
5594 static rtx
5595 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5596 enum machine_mode innermode, unsigned int byte)
5598 /* We support up to 512-bit values (for V8DFmode). */
5599 enum {
5600 max_bitsize = 512,
5601 value_bit = 8,
5602 value_mask = (1 << value_bit) - 1
5604 unsigned char value[max_bitsize / value_bit];
5605 int value_start;
5606 int i;
5607 int elem;
5609 int num_elem;
5610 rtx * elems;
5611 int elem_bitsize;
5612 rtx result_s;
5613 rtvec result_v = NULL;
5614 enum mode_class outer_class;
5615 enum machine_mode outer_submode;
5617 /* Some ports misuse CCmode. */
5618 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5619 return op;
5621 /* We have no way to represent a complex constant at the rtl level. */
5622 if (COMPLEX_MODE_P (outermode))
5623 return NULL_RTX;
5625 /* Unpack the value. */
5627 if (GET_CODE (op) == CONST_VECTOR)
5629 num_elem = CONST_VECTOR_NUNITS (op);
5630 elems = &CONST_VECTOR_ELT (op, 0);
5631 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5633 else
5635 num_elem = 1;
5636 elems = &op;
5637 elem_bitsize = max_bitsize;
5639 /* If this asserts, it is too complicated; reducing value_bit may help. */
5640 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5641 /* I don't know how to handle endianness of sub-units. */
5642 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5644 for (elem = 0; elem < num_elem; elem++)
5646 unsigned char * vp;
5647 rtx el = elems[elem];
5649 /* Vectors are kept in target memory order. (This is probably
5650 a mistake.) */
5652 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5653 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5654 / BITS_PER_UNIT);
5655 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5656 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5657 unsigned bytele = (subword_byte % UNITS_PER_WORD
5658 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5659 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5662 switch (GET_CODE (el))
5664 case CONST_INT:
5665 for (i = 0;
5666 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5667 i += value_bit)
5668 *vp++ = INTVAL (el) >> i;
5669 /* CONST_INTs are always logically sign-extended. */
5670 for (; i < elem_bitsize; i += value_bit)
5671 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5672 break;
5674 case CONST_DOUBLE:
5675 if (GET_MODE (el) == VOIDmode)
5677 unsigned char extend = 0;
5678 /* If this triggers, someone should have generated a
5679 CONST_INT instead. */
5680 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5682 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5683 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5684 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5686 *vp++
5687 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5688 i += value_bit;
5691 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5692 extend = -1;
5693 for (; i < elem_bitsize; i += value_bit)
5694 *vp++ = extend;
5696 else
5698 long tmp[max_bitsize / 32];
5699 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5701 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5702 gcc_assert (bitsize <= elem_bitsize);
5703 gcc_assert (bitsize % value_bit == 0);
5705 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5706 GET_MODE (el));
5708 /* real_to_target produces its result in words affected by
5709 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5710 and use WORDS_BIG_ENDIAN instead; see the documentation
5711 of SUBREG in rtl.texi. */
5712 for (i = 0; i < bitsize; i += value_bit)
5714 int ibase;
5715 if (WORDS_BIG_ENDIAN)
5716 ibase = bitsize - 1 - i;
5717 else
5718 ibase = i;
5719 *vp++ = tmp[ibase / 32] >> i % 32;
5722 /* It shouldn't matter what's done here, so fill it with
5723 zero. */
5724 for (; i < elem_bitsize; i += value_bit)
5725 *vp++ = 0;
5727 break;
5729 case CONST_FIXED:
5730 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5732 for (i = 0; i < elem_bitsize; i += value_bit)
5733 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5735 else
5737 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5738 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5739 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5740 i += value_bit)
5741 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5742 >> (i - HOST_BITS_PER_WIDE_INT);
5743 for (; i < elem_bitsize; i += value_bit)
5744 *vp++ = 0;
5746 break;
5748 default:
5749 gcc_unreachable ();
5753 /* Now, pick the right byte to start with. */
5754 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5755 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5756 will already have offset 0. */
5757 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5759 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5760 - byte);
5761 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5762 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5763 byte = (subword_byte % UNITS_PER_WORD
5764 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5767 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5768 so if it's become negative it will instead be very large.) */
5769 gcc_assert (byte < GET_MODE_SIZE (innermode));
5771 /* Convert from bytes to chunks of size value_bit. */
5772 value_start = byte * (BITS_PER_UNIT / value_bit);
5774 /* Re-pack the value. */
5776 if (VECTOR_MODE_P (outermode))
5778 num_elem = GET_MODE_NUNITS (outermode);
5779 result_v = rtvec_alloc (num_elem);
5780 elems = &RTVEC_ELT (result_v, 0);
5781 outer_submode = GET_MODE_INNER (outermode);
5783 else
5785 num_elem = 1;
5786 elems = &result_s;
5787 outer_submode = outermode;
5790 outer_class = GET_MODE_CLASS (outer_submode);
5791 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5793 gcc_assert (elem_bitsize % value_bit == 0);
5794 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5796 for (elem = 0; elem < num_elem; elem++)
5798 unsigned char *vp;
5800 /* Vectors are stored in target memory order. (This is probably
5801 a mistake.) */
5803 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5804 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5805 / BITS_PER_UNIT);
5806 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5807 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5808 unsigned bytele = (subword_byte % UNITS_PER_WORD
5809 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5810 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5813 switch (outer_class)
5815 case MODE_INT:
5816 case MODE_PARTIAL_INT:
5818 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5820 for (i = 0;
5821 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5822 i += value_bit)
5823 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5824 for (; i < elem_bitsize; i += value_bit)
5825 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5826 << (i - HOST_BITS_PER_WIDE_INT);
5828 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5829 know why. */
5830 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5831 elems[elem] = gen_int_mode (lo, outer_submode);
5832 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5833 elems[elem] = immed_double_const (lo, hi, outer_submode);
5834 else
5835 return NULL_RTX;
5837 break;
5839 case MODE_FLOAT:
5840 case MODE_DECIMAL_FLOAT:
5842 REAL_VALUE_TYPE r;
5843 long tmp[max_bitsize / 32];
5845 /* real_from_target wants its input in words affected by
5846 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5847 and use WORDS_BIG_ENDIAN instead; see the documentation
5848 of SUBREG in rtl.texi. */
5849 for (i = 0; i < max_bitsize / 32; i++)
5850 tmp[i] = 0;
5851 for (i = 0; i < elem_bitsize; i += value_bit)
5853 int ibase;
5854 if (WORDS_BIG_ENDIAN)
5855 ibase = elem_bitsize - 1 - i;
5856 else
5857 ibase = i;
5858 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5861 real_from_target (&r, tmp, outer_submode);
5862 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5864 break;
5866 case MODE_FRACT:
5867 case MODE_UFRACT:
5868 case MODE_ACCUM:
5869 case MODE_UACCUM:
5871 FIXED_VALUE_TYPE f;
5872 f.data.low = 0;
5873 f.data.high = 0;
5874 f.mode = outer_submode;
5876 for (i = 0;
5877 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5878 i += value_bit)
5879 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5880 for (; i < elem_bitsize; i += value_bit)
5881 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5882 << (i - HOST_BITS_PER_WIDE_INT));
5884 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5886 break;
5888 default:
5889 gcc_unreachable ();
5892 if (VECTOR_MODE_P (outermode))
5893 return gen_rtx_CONST_VECTOR (outermode, result_v);
5894 else
5895 return result_s;
5898 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5899 Return 0 if no simplifications are possible. */
5901 simplify_subreg (enum machine_mode outermode, rtx op,
5902 enum machine_mode innermode, unsigned int byte)
5904 /* Little bit of sanity checking. */
5905 gcc_assert (innermode != VOIDmode);
5906 gcc_assert (outermode != VOIDmode);
5907 gcc_assert (innermode != BLKmode);
5908 gcc_assert (outermode != BLKmode);
5910 gcc_assert (GET_MODE (op) == innermode
5911 || GET_MODE (op) == VOIDmode);
5913 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5914 return NULL_RTX;
5916 if (byte >= GET_MODE_SIZE (innermode))
5917 return NULL_RTX;
5919 if (outermode == innermode && !byte)
5920 return op;
5922 if (CONST_SCALAR_INT_P (op)
5923 || CONST_DOUBLE_AS_FLOAT_P (op)
5924 || GET_CODE (op) == CONST_FIXED
5925 || GET_CODE (op) == CONST_VECTOR)
5926 return simplify_immed_subreg (outermode, op, innermode, byte);
5928 /* Changing mode twice with SUBREG => just change it once,
5929 or not at all if changing back op starting mode. */
5930 if (GET_CODE (op) == SUBREG)
5932 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5933 int final_offset = byte + SUBREG_BYTE (op);
5934 rtx newx;
5936 if (outermode == innermostmode
5937 && byte == 0 && SUBREG_BYTE (op) == 0)
5938 return SUBREG_REG (op);
5940 /* The SUBREG_BYTE represents offset, as if the value were stored
5941 in memory. Irritating exception is paradoxical subreg, where
5942 we define SUBREG_BYTE to be 0. On big endian machines, this
5943 value should be negative. For a moment, undo this exception. */
5944 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5946 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5947 if (WORDS_BIG_ENDIAN)
5948 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5949 if (BYTES_BIG_ENDIAN)
5950 final_offset += difference % UNITS_PER_WORD;
5952 if (SUBREG_BYTE (op) == 0
5953 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5955 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5956 if (WORDS_BIG_ENDIAN)
5957 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5958 if (BYTES_BIG_ENDIAN)
5959 final_offset += difference % UNITS_PER_WORD;
5962 /* See whether resulting subreg will be paradoxical. */
5963 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5965 /* In nonparadoxical subregs we can't handle negative offsets. */
5966 if (final_offset < 0)
5967 return NULL_RTX;
5968 /* Bail out in case resulting subreg would be incorrect. */
5969 if (final_offset % GET_MODE_SIZE (outermode)
5970 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5971 return NULL_RTX;
5973 else
5975 int offset = 0;
5976 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5978 /* In paradoxical subreg, see if we are still looking on lower part.
5979 If so, our SUBREG_BYTE will be 0. */
5980 if (WORDS_BIG_ENDIAN)
5981 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5982 if (BYTES_BIG_ENDIAN)
5983 offset += difference % UNITS_PER_WORD;
5984 if (offset == final_offset)
5985 final_offset = 0;
5986 else
5987 return NULL_RTX;
5990 /* Recurse for further possible simplifications. */
5991 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5992 final_offset);
5993 if (newx)
5994 return newx;
5995 if (validate_subreg (outermode, innermostmode,
5996 SUBREG_REG (op), final_offset))
5998 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5999 if (SUBREG_PROMOTED_VAR_P (op)
6000 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
6001 && GET_MODE_CLASS (outermode) == MODE_INT
6002 && IN_RANGE (GET_MODE_SIZE (outermode),
6003 GET_MODE_SIZE (innermode),
6004 GET_MODE_SIZE (innermostmode))
6005 && subreg_lowpart_p (newx))
6007 SUBREG_PROMOTED_VAR_P (newx) = 1;
6008 SUBREG_PROMOTED_UNSIGNED_SET
6009 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
6011 return newx;
6013 return NULL_RTX;
6016 /* SUBREG of a hard register => just change the register number
6017 and/or mode. If the hard register is not valid in that mode,
6018 suppress this simplification. If the hard register is the stack,
6019 frame, or argument pointer, leave this as a SUBREG. */
6021 if (REG_P (op) && HARD_REGISTER_P (op))
6023 unsigned int regno, final_regno;
6025 regno = REGNO (op);
6026 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6027 if (HARD_REGISTER_NUM_P (final_regno))
6029 rtx x;
6030 int final_offset = byte;
6032 /* Adjust offset for paradoxical subregs. */
6033 if (byte == 0
6034 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6036 int difference = (GET_MODE_SIZE (innermode)
6037 - GET_MODE_SIZE (outermode));
6038 if (WORDS_BIG_ENDIAN)
6039 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6040 if (BYTES_BIG_ENDIAN)
6041 final_offset += difference % UNITS_PER_WORD;
6044 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6046 /* Propagate original regno. We don't have any way to specify
6047 the offset inside original regno, so do so only for lowpart.
6048 The information is used only by alias analysis that can not
6049 grog partial register anyway. */
6051 if (subreg_lowpart_offset (outermode, innermode) == byte)
6052 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6053 return x;
6057 /* If we have a SUBREG of a register that we are replacing and we are
6058 replacing it with a MEM, make a new MEM and try replacing the
6059 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6060 or if we would be widening it. */
6062 if (MEM_P (op)
6063 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6064 /* Allow splitting of volatile memory references in case we don't
6065 have instruction to move the whole thing. */
6066 && (! MEM_VOLATILE_P (op)
6067 || ! have_insn_for (SET, innermode))
6068 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6069 return adjust_address_nv (op, outermode, byte);
6071 /* Handle complex values represented as CONCAT
6072 of real and imaginary part. */
6073 if (GET_CODE (op) == CONCAT)
6075 unsigned int part_size, final_offset;
6076 rtx part, res;
6078 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6079 if (byte < part_size)
6081 part = XEXP (op, 0);
6082 final_offset = byte;
6084 else
6086 part = XEXP (op, 1);
6087 final_offset = byte - part_size;
6090 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6091 return NULL_RTX;
6093 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6094 if (res)
6095 return res;
6096 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6097 return gen_rtx_SUBREG (outermode, part, final_offset);
6098 return NULL_RTX;
6101 /* A SUBREG resulting from a zero extension may fold to zero if
6102 it extracts higher bits that the ZERO_EXTEND's source bits. */
6103 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6105 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6106 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6107 return CONST0_RTX (outermode);
6110 if (SCALAR_INT_MODE_P (outermode)
6111 && SCALAR_INT_MODE_P (innermode)
6112 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6113 && byte == subreg_lowpart_offset (outermode, innermode))
6115 rtx tem = simplify_truncation (outermode, op, innermode);
6116 if (tem)
6117 return tem;
6120 return NULL_RTX;
6123 /* Make a SUBREG operation or equivalent if it folds. */
6126 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6127 enum machine_mode innermode, unsigned int byte)
6129 rtx newx;
6131 newx = simplify_subreg (outermode, op, innermode, byte);
6132 if (newx)
6133 return newx;
6135 if (GET_CODE (op) == SUBREG
6136 || GET_CODE (op) == CONCAT
6137 || GET_MODE (op) == VOIDmode)
6138 return NULL_RTX;
6140 if (validate_subreg (outermode, innermode, op, byte))
6141 return gen_rtx_SUBREG (outermode, op, byte);
6143 return NULL_RTX;
6146 /* Simplify X, an rtx expression.
6148 Return the simplified expression or NULL if no simplifications
6149 were possible.
6151 This is the preferred entry point into the simplification routines;
6152 however, we still allow passes to call the more specific routines.
6154 Right now GCC has three (yes, three) major bodies of RTL simplification
6155 code that need to be unified.
6157 1. fold_rtx in cse.c. This code uses various CSE specific
6158 information to aid in RTL simplification.
6160 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6161 it uses combine specific information to aid in RTL
6162 simplification.
6164 3. The routines in this file.
6167 Long term we want to only have one body of simplification code; to
6168 get to that state I recommend the following steps:
6170 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6171 which are not pass dependent state into these routines.
6173 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6174 use this routine whenever possible.
6176 3. Allow for pass dependent state to be provided to these
6177 routines and add simplifications based on the pass dependent
6178 state. Remove code from cse.c & combine.c that becomes
6179 redundant/dead.
6181 It will take time, but ultimately the compiler will be easier to
6182 maintain and improve. It's totally silly that when we add a
6183 simplification that it needs to be added to 4 places (3 for RTL
6184 simplification and 1 for tree simplification. */
6187 simplify_rtx (const_rtx x)
6189 const enum rtx_code code = GET_CODE (x);
6190 const enum machine_mode mode = GET_MODE (x);
6192 switch (GET_RTX_CLASS (code))
6194 case RTX_UNARY:
6195 return simplify_unary_operation (code, mode,
6196 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6197 case RTX_COMM_ARITH:
6198 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6199 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6201 /* Fall through.... */
6203 case RTX_BIN_ARITH:
6204 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6206 case RTX_TERNARY:
6207 case RTX_BITFIELD_OPS:
6208 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6209 XEXP (x, 0), XEXP (x, 1),
6210 XEXP (x, 2));
6212 case RTX_COMPARE:
6213 case RTX_COMM_COMPARE:
6214 return simplify_relational_operation (code, mode,
6215 ((GET_MODE (XEXP (x, 0))
6216 != VOIDmode)
6217 ? GET_MODE (XEXP (x, 0))
6218 : GET_MODE (XEXP (x, 1))),
6219 XEXP (x, 0),
6220 XEXP (x, 1));
6222 case RTX_EXTRA:
6223 if (code == SUBREG)
6224 return simplify_subreg (mode, SUBREG_REG (x),
6225 GET_MODE (SUBREG_REG (x)),
6226 SUBREG_BYTE (x));
6227 break;
6229 case RTX_OBJ:
6230 if (code == LO_SUM)
6232 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6233 if (GET_CODE (XEXP (x, 0)) == HIGH
6234 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6235 return XEXP (x, 1);
6237 break;
6239 default:
6240 break;
6242 return NULL;