* gcc.dg/torture/tls/tls-reload-1.c: Add tls options.
[official-gcc.git] / gcc / simplify-rtx.c
blobea99c64bff848f493891ace69eb8ef424925f6a9
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "ggc.h"
39 #include "target.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx neg_const_int (enum machine_mode, const_rtx);
51 static bool plus_minus_operand_p (const_rtx);
52 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
53 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
54 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
55 unsigned int);
56 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
57 rtx, rtx);
58 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
59 enum machine_mode, rtx, rtx);
60 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
61 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
62 rtx, rtx, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, const_rtx i)
69 return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_PRECISION (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && CONST_INT_P (x))
90 val = INTVAL (x);
91 else if (width <= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x)
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 /* FIXME: We don't yet have a representation for wider modes. */
100 return false;
102 if (width < HOST_BITS_PER_WIDE_INT)
103 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
114 unsigned int width;
116 if (GET_MODE_CLASS (mode) != MODE_INT)
117 return false;
119 width = GET_MODE_PRECISION (mode);
120 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121 return false;
123 val &= GET_MODE_MASK (mode);
124 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
134 if (GET_MODE_CLASS (mode) != MODE_INT)
135 return false;
137 width = GET_MODE_PRECISION (mode);
138 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139 return false;
141 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142 return val != 0;
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
150 unsigned int width;
152 if (GET_MODE_CLASS (mode) != MODE_INT)
153 return false;
155 width = GET_MODE_PRECISION (mode);
156 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157 return false;
159 val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160 return val == 0;
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 rtx op1)
170 rtx tem;
172 /* If this simplifies, do it. */
173 tem = simplify_binary_operation (code, mode, op0, op1);
174 if (tem)
175 return tem;
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0, op1))
180 tem = op0, op0 = op1, op1 = tem;
182 return gen_rtx_fmt_ee (code, mode, op0, op1);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x)
190 rtx c, tmp, addr;
191 enum machine_mode cmode;
192 HOST_WIDE_INT offset = 0;
194 switch (GET_CODE (x))
196 case MEM:
197 break;
199 case FLOAT_EXTEND:
200 /* Handle float extensions of constant pool references. */
201 tmp = XEXP (x, 0);
202 c = avoid_constant_pool_reference (tmp);
203 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
205 REAL_VALUE_TYPE d;
207 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
210 return x;
212 default:
213 return x;
216 if (GET_MODE (x) == BLKmode)
217 return x;
219 addr = XEXP (x, 0);
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr = targetm.delegitimize_address (addr);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr) == CONST
226 && GET_CODE (XEXP (addr, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
229 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230 addr = XEXP (XEXP (addr, 0), 0);
233 if (GET_CODE (addr) == LO_SUM)
234 addr = XEXP (addr, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr))
241 c = get_pool_constant (addr);
242 cmode = get_pool_mode (addr);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset != 0 || cmode != GET_MODE (x))
249 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250 if (tem && CONSTANT_P (tem))
251 return tem;
253 else
254 return c;
257 return x;
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
269 if (MEM_P (x)
270 && MEM_EXPR (x)
271 && MEM_OFFSET_KNOWN_P (x))
273 tree decl = MEM_EXPR (x);
274 enum machine_mode mode = GET_MODE (x);
275 HOST_WIDE_INT offset = 0;
277 switch (TREE_CODE (decl))
279 default:
280 decl = NULL;
281 break;
283 case VAR_DECL:
284 break;
286 case ARRAY_REF:
287 case ARRAY_RANGE_REF:
288 case COMPONENT_REF:
289 case BIT_FIELD_REF:
290 case REALPART_EXPR:
291 case IMAGPART_EXPR:
292 case VIEW_CONVERT_EXPR:
294 HOST_WIDE_INT bitsize, bitpos;
295 tree toffset;
296 int unsignedp, volatilep = 0;
298 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 &mode, &unsignedp, &volatilep, false);
300 if (bitsize != GET_MODE_BITSIZE (mode)
301 || (bitpos % BITS_PER_UNIT)
302 || (toffset && !host_integerp (toffset, 0)))
303 decl = NULL;
304 else
306 offset += bitpos / BITS_PER_UNIT;
307 if (toffset)
308 offset += TREE_INT_CST_LOW (toffset);
310 break;
314 if (decl
315 && mode == GET_MODE (x)
316 && TREE_CODE (decl) == VAR_DECL
317 && (TREE_STATIC (decl)
318 || DECL_THREAD_LOCAL_P (decl))
319 && DECL_RTL_SET_P (decl)
320 && MEM_P (DECL_RTL (decl)))
322 rtx newx;
324 offset += MEM_OFFSET (x);
326 newx = DECL_RTL (decl);
328 if (MEM_P (newx))
330 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
338 if (!((offset == 0
339 || (GET_CODE (o) == PLUS
340 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 && (offset == INTVAL (XEXP (o, 1))
342 || (GET_CODE (n) == PLUS
343 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 && (INTVAL (XEXP (n, 1)) + offset
345 == INTVAL (XEXP (o, 1)))
346 && (n = XEXP (n, 0))))
347 && (o = XEXP (o, 0))))
348 && rtx_equal_p (o, n)))
349 x = adjust_address_nv (newx, mode, offset);
351 else if (GET_MODE (x) == GET_MODE (newx)
352 && offset == 0)
353 x = newx;
357 return x;
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 enum machine_mode op_mode)
367 rtx tem;
369 /* If this simplifies, use it. */
370 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371 return tem;
373 return gen_rtx_fmt_e (code, mode, op);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 rtx tem;
384 /* If this simplifies, use it. */
385 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 op0, op1, op2)))
387 return tem;
389 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 rtx tem;
401 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 op0, op1)))
403 return tem;
405 return gen_rtx_fmt_ee (code, mode, op0, op1);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
411 result. */
414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 enum rtx_code code = GET_CODE (x);
418 enum machine_mode mode = GET_MODE (x);
419 enum machine_mode op_mode;
420 const char *fmt;
421 rtx op0, op1, op2, newx, op;
422 rtvec vec, newvec;
423 int i, j;
425 if (__builtin_expect (fn != NULL, 0))
427 newx = fn (x, old_rtx, data);
428 if (newx)
429 return newx;
431 else if (rtx_equal_p (x, old_rtx))
432 return copy_rtx ((rtx) data);
434 switch (GET_RTX_CLASS (code))
436 case RTX_UNARY:
437 op0 = XEXP (x, 0);
438 op_mode = GET_MODE (op0);
439 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440 if (op0 == XEXP (x, 0))
441 return x;
442 return simplify_gen_unary (code, mode, op0, op_mode);
444 case RTX_BIN_ARITH:
445 case RTX_COMM_ARITH:
446 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 return x;
450 return simplify_gen_binary (code, mode, op0, op1);
452 case RTX_COMPARE:
453 case RTX_COMM_COMPARE:
454 op0 = XEXP (x, 0);
455 op1 = XEXP (x, 1);
456 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 return x;
461 return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 case RTX_TERNARY:
464 case RTX_BITFIELD_OPS:
465 op0 = XEXP (x, 0);
466 op_mode = GET_MODE (op0);
467 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 return x;
472 if (op_mode == VOIDmode)
473 op_mode = GET_MODE (op0);
474 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476 case RTX_EXTRA:
477 if (code == SUBREG)
479 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 if (op0 == SUBREG_REG (x))
481 return x;
482 op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 GET_MODE (SUBREG_REG (x)),
484 SUBREG_BYTE (x));
485 return op0 ? op0 : x;
487 break;
489 case RTX_OBJ:
490 if (code == MEM)
492 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 if (op0 == XEXP (x, 0))
494 return x;
495 return replace_equiv_address_nv (x, op0);
497 else if (code == LO_SUM)
499 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 return op1;
506 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 return x;
508 return gen_rtx_LO_SUM (mode, op0, op1);
510 break;
512 default:
513 break;
516 newx = x;
517 fmt = GET_RTX_FORMAT (code);
518 for (i = 0; fmt[i]; i++)
519 switch (fmt[i])
521 case 'E':
522 vec = XVEC (x, i);
523 newvec = XVEC (newx, i);
524 for (j = 0; j < GET_NUM_ELEM (vec); j++)
526 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 old_rtx, fn, data);
528 if (op != RTVEC_ELT (vec, j))
530 if (newvec == vec)
532 newvec = shallow_copy_rtvec (vec);
533 if (x == newx)
534 newx = shallow_copy_rtx (x);
535 XVEC (newx, i) = newvec;
537 RTVEC_ELT (newvec, j) = op;
540 break;
542 case 'e':
543 if (XEXP (x, i))
545 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 if (op != XEXP (x, i))
548 if (x == newx)
549 newx = shallow_copy_rtx (x);
550 XEXP (newx, i) = op;
553 break;
555 return newx;
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
564 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
567 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
568 Only handle cases where the truncated value is inherently an rvalue.
570 RTL provides two ways of truncating a value:
572 1. a lowpart subreg. This form is only a truncation when both
573 the outer and inner modes (here MODE and OP_MODE respectively)
574 are scalar integers, and only then when the subreg is used as
575 an rvalue.
577 It is only valid to form such truncating subregs if the
578 truncation requires no action by the target. The onus for
579 proving this is on the creator of the subreg -- e.g. the
580 caller to simplify_subreg or simplify_gen_subreg -- and typically
581 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
583 2. a TRUNCATE. This form handles both scalar and compound integers.
585 The first form is preferred where valid. However, the TRUNCATE
586 handling in simplify_unary_operation turns the second form into the
587 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
588 so it is generally safe to form rvalue truncations using:
590 simplify_gen_unary (TRUNCATE, ...)
592 and leave simplify_unary_operation to work out which representation
593 should be used.
595 Because of the proof requirements on (1), simplify_truncation must
596 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
597 regardless of whether the outer truncation came from a SUBREG or a
598 TRUNCATE. For example, if the caller has proven that an SImode
599 truncation of:
601 (and:DI X Y)
603 is a no-op and can be represented as a subreg, it does not follow
604 that SImode truncations of X and Y are also no-ops. On a target
605 like 64-bit MIPS that requires SImode values to be stored in
606 sign-extended form, an SImode truncation of:
608 (and:DI (reg:DI X) (const_int 63))
610 is trivially a no-op because only the lower 6 bits can be set.
611 However, X is still an arbitrary 64-bit number and so we cannot
612 assume that truncating it too is a no-op. */
614 static rtx
615 simplify_truncation (enum machine_mode mode, rtx op,
616 enum machine_mode op_mode)
618 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
619 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
620 gcc_assert (precision <= op_precision);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op) == ZERO_EXTEND
624 || GET_CODE (op) == SIGN_EXTEND)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
631 mode. */
632 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
633 if (mode == origmode)
634 return XEXP (op, 0);
635 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 return simplify_gen_unary (TRUNCATE, mode,
637 XEXP (op, 0), origmode);
638 else
639 return simplify_gen_unary (GET_CODE (op), mode,
640 XEXP (op, 0), origmode);
643 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
644 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
645 if (GET_CODE (op) == PLUS
646 || GET_CODE (op) == MINUS
647 || GET_CODE (op) == MULT)
649 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
650 if (op0)
652 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
653 if (op1)
654 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
658 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
659 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
660 the outer subreg is effectively a truncation to the original mode. */
661 if ((GET_CODE (op) == LSHIFTRT
662 || GET_CODE (op) == ASHIFTRT)
663 /* Ensure that OP_MODE is at least twice as wide as MODE
664 to avoid the possibility that an outer LSHIFTRT shifts by more
665 than the sign extension's sign_bit_copies and introduces zeros
666 into the high bits of the result. */
667 && 2 * precision <= op_precision
668 && CONST_INT_P (XEXP (op, 1))
669 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
670 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
671 && UINTVAL (XEXP (op, 1)) < precision)
672 return simplify_gen_binary (ASHIFTRT, mode,
673 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
675 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
676 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
677 the outer subreg is effectively a truncation to the original mode. */
678 if ((GET_CODE (op) == LSHIFTRT
679 || GET_CODE (op) == ASHIFTRT)
680 && CONST_INT_P (XEXP (op, 1))
681 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
682 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
683 && UINTVAL (XEXP (op, 1)) < precision)
684 return simplify_gen_binary (LSHIFTRT, mode,
685 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
687 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
688 to (ashift:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if (GET_CODE (op) == ASHIFT
691 && CONST_INT_P (XEXP (op, 1))
692 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
693 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
694 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
695 && UINTVAL (XEXP (op, 1)) < precision)
696 return simplify_gen_binary (ASHIFT, mode,
697 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
699 /* Recognize a word extraction from a multi-word subreg. */
700 if ((GET_CODE (op) == LSHIFTRT
701 || GET_CODE (op) == ASHIFTRT)
702 && SCALAR_INT_MODE_P (mode)
703 && SCALAR_INT_MODE_P (op_mode)
704 && precision >= BITS_PER_WORD
705 && 2 * precision <= op_precision
706 && CONST_INT_P (XEXP (op, 1))
707 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
708 && UINTVAL (XEXP (op, 1)) < op_precision)
710 int byte = subreg_lowpart_offset (mode, op_mode);
711 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
712 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
713 (WORDS_BIG_ENDIAN
714 ? byte - shifted_bytes
715 : byte + shifted_bytes));
718 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
719 and try replacing the TRUNCATE and shift with it. Don't do this
720 if the MEM has a mode-dependent address. */
721 if ((GET_CODE (op) == LSHIFTRT
722 || GET_CODE (op) == ASHIFTRT)
723 && SCALAR_INT_MODE_P (op_mode)
724 && MEM_P (XEXP (op, 0))
725 && CONST_INT_P (XEXP (op, 1))
726 && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
727 && INTVAL (XEXP (op, 1)) > 0
728 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
729 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
730 MEM_ADDR_SPACE (XEXP (op, 0)))
731 && ! MEM_VOLATILE_P (XEXP (op, 0))
732 && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
733 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
735 int byte = subreg_lowpart_offset (mode, op_mode);
736 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
737 return adjust_address_nv (XEXP (op, 0), mode,
738 (WORDS_BIG_ENDIAN
739 ? byte - shifted_bytes
740 : byte + shifted_bytes));
743 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
744 (OP:SI foo:SI) if OP is NEG or ABS. */
745 if ((GET_CODE (op) == ABS
746 || GET_CODE (op) == NEG)
747 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
748 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
749 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
750 return simplify_gen_unary (GET_CODE (op), mode,
751 XEXP (XEXP (op, 0), 0), mode);
753 /* (truncate:A (subreg:B (truncate:C X) 0)) is
754 (truncate:A X). */
755 if (GET_CODE (op) == SUBREG
756 && SCALAR_INT_MODE_P (mode)
757 && SCALAR_INT_MODE_P (op_mode)
758 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
759 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
760 && subreg_lowpart_p (op))
761 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
762 GET_MODE (XEXP (SUBREG_REG (op), 0)));
764 /* (truncate:A (truncate:B X)) is (truncate:A X). */
765 if (GET_CODE (op) == TRUNCATE)
766 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
767 GET_MODE (XEXP (op, 0)));
769 return NULL_RTX;
772 /* Try to simplify a unary operation CODE whose output mode is to be
773 MODE with input operand OP whose mode was originally OP_MODE.
774 Return zero if no simplification can be made. */
776 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
777 rtx op, enum machine_mode op_mode)
779 rtx trueop, tem;
781 trueop = avoid_constant_pool_reference (op);
783 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
784 if (tem)
785 return tem;
787 return simplify_unary_operation_1 (code, mode, op);
790 /* Perform some simplifications we can do even if the operands
791 aren't constant. */
792 static rtx
793 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
795 enum rtx_code reversed;
796 rtx temp;
798 switch (code)
800 case NOT:
801 /* (not (not X)) == X. */
802 if (GET_CODE (op) == NOT)
803 return XEXP (op, 0);
805 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
806 comparison is all ones. */
807 if (COMPARISON_P (op)
808 && (mode == BImode || STORE_FLAG_VALUE == -1)
809 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
810 return simplify_gen_relational (reversed, mode, VOIDmode,
811 XEXP (op, 0), XEXP (op, 1));
813 /* (not (plus X -1)) can become (neg X). */
814 if (GET_CODE (op) == PLUS
815 && XEXP (op, 1) == constm1_rtx)
816 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
818 /* Similarly, (not (neg X)) is (plus X -1). */
819 if (GET_CODE (op) == NEG)
820 return plus_constant (mode, XEXP (op, 0), -1);
822 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
823 if (GET_CODE (op) == XOR
824 && CONST_INT_P (XEXP (op, 1))
825 && (temp = simplify_unary_operation (NOT, mode,
826 XEXP (op, 1), mode)) != 0)
827 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
829 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
830 if (GET_CODE (op) == PLUS
831 && CONST_INT_P (XEXP (op, 1))
832 && mode_signbit_p (mode, XEXP (op, 1))
833 && (temp = simplify_unary_operation (NOT, mode,
834 XEXP (op, 1), mode)) != 0)
835 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
838 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
839 operands other than 1, but that is not valid. We could do a
840 similar simplification for (not (lshiftrt C X)) where C is
841 just the sign bit, but this doesn't seem common enough to
842 bother with. */
843 if (GET_CODE (op) == ASHIFT
844 && XEXP (op, 0) == const1_rtx)
846 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
847 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
850 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
851 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
852 so we can perform the above simplification. */
854 if (STORE_FLAG_VALUE == -1
855 && GET_CODE (op) == ASHIFTRT
856 && GET_CODE (XEXP (op, 1))
857 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
858 return simplify_gen_relational (GE, mode, VOIDmode,
859 XEXP (op, 0), const0_rtx);
862 if (GET_CODE (op) == SUBREG
863 && subreg_lowpart_p (op)
864 && (GET_MODE_SIZE (GET_MODE (op))
865 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
866 && GET_CODE (SUBREG_REG (op)) == ASHIFT
867 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
869 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
870 rtx x;
872 x = gen_rtx_ROTATE (inner_mode,
873 simplify_gen_unary (NOT, inner_mode, const1_rtx,
874 inner_mode),
875 XEXP (SUBREG_REG (op), 1));
876 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
877 if (temp)
878 return temp;
881 /* Apply De Morgan's laws to reduce number of patterns for machines
882 with negating logical insns (and-not, nand, etc.). If result has
883 only one NOT, put it first, since that is how the patterns are
884 coded. */
886 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
888 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
889 enum machine_mode op_mode;
891 op_mode = GET_MODE (in1);
892 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
894 op_mode = GET_MODE (in2);
895 if (op_mode == VOIDmode)
896 op_mode = mode;
897 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
899 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
901 rtx tem = in2;
902 in2 = in1; in1 = tem;
905 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
906 mode, in1, in2);
908 break;
910 case NEG:
911 /* (neg (neg X)) == X. */
912 if (GET_CODE (op) == NEG)
913 return XEXP (op, 0);
915 /* (neg (plus X 1)) can become (not X). */
916 if (GET_CODE (op) == PLUS
917 && XEXP (op, 1) == const1_rtx)
918 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
920 /* Similarly, (neg (not X)) is (plus X 1). */
921 if (GET_CODE (op) == NOT)
922 return plus_constant (mode, XEXP (op, 0), 1);
924 /* (neg (minus X Y)) can become (minus Y X). This transformation
925 isn't safe for modes with signed zeros, since if X and Y are
926 both +0, (minus Y X) is the same as (minus X Y). If the
927 rounding mode is towards +infinity (or -infinity) then the two
928 expressions will be rounded differently. */
929 if (GET_CODE (op) == MINUS
930 && !HONOR_SIGNED_ZEROS (mode)
931 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
932 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
934 if (GET_CODE (op) == PLUS
935 && !HONOR_SIGNED_ZEROS (mode)
936 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
938 /* (neg (plus A C)) is simplified to (minus -C A). */
939 if (CONST_SCALAR_INT_P (XEXP (op, 1))
940 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
942 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
943 if (temp)
944 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
947 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
948 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
949 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
952 /* (neg (mult A B)) becomes (mult A (neg B)).
953 This works even for floating-point values. */
954 if (GET_CODE (op) == MULT
955 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
957 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
958 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
961 /* NEG commutes with ASHIFT since it is multiplication. Only do
962 this if we can then eliminate the NEG (e.g., if the operand
963 is a constant). */
964 if (GET_CODE (op) == ASHIFT)
966 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
967 if (temp)
968 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
971 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
972 C is equal to the width of MODE minus 1. */
973 if (GET_CODE (op) == ASHIFTRT
974 && CONST_INT_P (XEXP (op, 1))
975 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
976 return simplify_gen_binary (LSHIFTRT, mode,
977 XEXP (op, 0), XEXP (op, 1));
979 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
980 C is equal to the width of MODE minus 1. */
981 if (GET_CODE (op) == LSHIFTRT
982 && CONST_INT_P (XEXP (op, 1))
983 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
984 return simplify_gen_binary (ASHIFTRT, mode,
985 XEXP (op, 0), XEXP (op, 1));
987 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
988 if (GET_CODE (op) == XOR
989 && XEXP (op, 1) == const1_rtx
990 && nonzero_bits (XEXP (op, 0), mode) == 1)
991 return plus_constant (mode, XEXP (op, 0), -1);
993 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
994 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
995 if (GET_CODE (op) == LT
996 && XEXP (op, 1) == const0_rtx
997 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
999 enum machine_mode inner = GET_MODE (XEXP (op, 0));
1000 int isize = GET_MODE_PRECISION (inner);
1001 if (STORE_FLAG_VALUE == 1)
1003 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1004 GEN_INT (isize - 1));
1005 if (mode == inner)
1006 return temp;
1007 if (GET_MODE_PRECISION (mode) > isize)
1008 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1009 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1011 else if (STORE_FLAG_VALUE == -1)
1013 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1014 GEN_INT (isize - 1));
1015 if (mode == inner)
1016 return temp;
1017 if (GET_MODE_PRECISION (mode) > isize)
1018 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1019 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1022 break;
1024 case TRUNCATE:
1025 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1026 with the umulXi3_highpart patterns. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && GET_CODE (XEXP (op, 0)) == MULT)
1029 break;
1031 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1033 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1035 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1036 if (temp)
1037 return temp;
1039 /* We can't handle truncation to a partial integer mode here
1040 because we don't know the real bitsize of the partial
1041 integer mode. */
1042 break;
1045 if (GET_MODE (op) != VOIDmode)
1047 temp = simplify_truncation (mode, op, GET_MODE (op));
1048 if (temp)
1049 return temp;
1052 /* If we know that the value is already truncated, we can
1053 replace the TRUNCATE with a SUBREG. */
1054 if (GET_MODE_NUNITS (mode) == 1
1055 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1056 || truncated_to_mode (mode, op)))
1058 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1059 if (temp)
1060 return temp;
1063 /* A truncate of a comparison can be replaced with a subreg if
1064 STORE_FLAG_VALUE permits. This is like the previous test,
1065 but it works even if the comparison is done in a mode larger
1066 than HOST_BITS_PER_WIDE_INT. */
1067 if (HWI_COMPUTABLE_MODE_P (mode)
1068 && COMPARISON_P (op)
1069 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1071 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1072 if (temp)
1073 return temp;
1076 /* A truncate of a memory is just loading the low part of the memory
1077 if we are not changing the meaning of the address. */
1078 if (GET_CODE (op) == MEM
1079 && !VECTOR_MODE_P (mode)
1080 && !MEM_VOLATILE_P (op)
1081 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1083 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1084 if (temp)
1085 return temp;
1088 break;
1090 case FLOAT_TRUNCATE:
1091 if (DECIMAL_FLOAT_MODE_P (mode))
1092 break;
1094 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1095 if (GET_CODE (op) == FLOAT_EXTEND
1096 && GET_MODE (XEXP (op, 0)) == mode)
1097 return XEXP (op, 0);
1099 /* (float_truncate:SF (float_truncate:DF foo:XF))
1100 = (float_truncate:SF foo:XF).
1101 This may eliminate double rounding, so it is unsafe.
1103 (float_truncate:SF (float_extend:XF foo:DF))
1104 = (float_truncate:SF foo:DF).
1106 (float_truncate:DF (float_extend:XF foo:SF))
1107 = (float_extend:SF foo:DF). */
1108 if ((GET_CODE (op) == FLOAT_TRUNCATE
1109 && flag_unsafe_math_optimizations)
1110 || GET_CODE (op) == FLOAT_EXTEND)
1111 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1112 0)))
1113 > GET_MODE_SIZE (mode)
1114 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1115 mode,
1116 XEXP (op, 0), mode);
1118 /* (float_truncate (float x)) is (float x) */
1119 if (GET_CODE (op) == FLOAT
1120 && (flag_unsafe_math_optimizations
1121 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1122 && ((unsigned)significand_size (GET_MODE (op))
1123 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1124 - num_sign_bit_copies (XEXP (op, 0),
1125 GET_MODE (XEXP (op, 0))))))))
1126 return simplify_gen_unary (FLOAT, mode,
1127 XEXP (op, 0),
1128 GET_MODE (XEXP (op, 0)));
1130 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1131 (OP:SF foo:SF) if OP is NEG or ABS. */
1132 if ((GET_CODE (op) == ABS
1133 || GET_CODE (op) == NEG)
1134 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1135 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1136 return simplify_gen_unary (GET_CODE (op), mode,
1137 XEXP (XEXP (op, 0), 0), mode);
1139 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1140 is (float_truncate:SF x). */
1141 if (GET_CODE (op) == SUBREG
1142 && subreg_lowpart_p (op)
1143 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1144 return SUBREG_REG (op);
1145 break;
1147 case FLOAT_EXTEND:
1148 if (DECIMAL_FLOAT_MODE_P (mode))
1149 break;
1151 /* (float_extend (float_extend x)) is (float_extend x)
1153 (float_extend (float x)) is (float x) assuming that double
1154 rounding can't happen.
1156 if (GET_CODE (op) == FLOAT_EXTEND
1157 || (GET_CODE (op) == FLOAT
1158 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1159 && ((unsigned)significand_size (GET_MODE (op))
1160 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1161 - num_sign_bit_copies (XEXP (op, 0),
1162 GET_MODE (XEXP (op, 0)))))))
1163 return simplify_gen_unary (GET_CODE (op), mode,
1164 XEXP (op, 0),
1165 GET_MODE (XEXP (op, 0)));
1167 break;
1169 case ABS:
1170 /* (abs (neg <foo>)) -> (abs <foo>) */
1171 if (GET_CODE (op) == NEG)
1172 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1173 GET_MODE (XEXP (op, 0)));
1175 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1176 do nothing. */
1177 if (GET_MODE (op) == VOIDmode)
1178 break;
1180 /* If operand is something known to be positive, ignore the ABS. */
1181 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1182 || val_signbit_known_clear_p (GET_MODE (op),
1183 nonzero_bits (op, GET_MODE (op))))
1184 return op;
1186 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1187 if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1188 return gen_rtx_NEG (mode, op);
1190 break;
1192 case FFS:
1193 /* (ffs (*_extend <X>)) = (ffs <X>) */
1194 if (GET_CODE (op) == SIGN_EXTEND
1195 || GET_CODE (op) == ZERO_EXTEND)
1196 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1197 GET_MODE (XEXP (op, 0)));
1198 break;
1200 case POPCOUNT:
1201 switch (GET_CODE (op))
1203 case BSWAP:
1204 case ZERO_EXTEND:
1205 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1206 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1207 GET_MODE (XEXP (op, 0)));
1209 case ROTATE:
1210 case ROTATERT:
1211 /* Rotations don't affect popcount. */
1212 if (!side_effects_p (XEXP (op, 1)))
1213 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1214 GET_MODE (XEXP (op, 0)));
1215 break;
1217 default:
1218 break;
1220 break;
1222 case PARITY:
1223 switch (GET_CODE (op))
1225 case NOT:
1226 case BSWAP:
1227 case ZERO_EXTEND:
1228 case SIGN_EXTEND:
1229 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1230 GET_MODE (XEXP (op, 0)));
1232 case ROTATE:
1233 case ROTATERT:
1234 /* Rotations don't affect parity. */
1235 if (!side_effects_p (XEXP (op, 1)))
1236 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1237 GET_MODE (XEXP (op, 0)));
1238 break;
1240 default:
1241 break;
1243 break;
1245 case BSWAP:
1246 /* (bswap (bswap x)) -> x. */
1247 if (GET_CODE (op) == BSWAP)
1248 return XEXP (op, 0);
1249 break;
1251 case FLOAT:
1252 /* (float (sign_extend <X>)) = (float <X>). */
1253 if (GET_CODE (op) == SIGN_EXTEND)
1254 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1255 GET_MODE (XEXP (op, 0)));
1256 break;
1258 case SIGN_EXTEND:
1259 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1260 becomes just the MINUS if its mode is MODE. This allows
1261 folding switch statements on machines using casesi (such as
1262 the VAX). */
1263 if (GET_CODE (op) == TRUNCATE
1264 && GET_MODE (XEXP (op, 0)) == mode
1265 && GET_CODE (XEXP (op, 0)) == MINUS
1266 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1267 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1268 return XEXP (op, 0);
1270 /* Extending a widening multiplication should be canonicalized to
1271 a wider widening multiplication. */
1272 if (GET_CODE (op) == MULT)
1274 rtx lhs = XEXP (op, 0);
1275 rtx rhs = XEXP (op, 1);
1276 enum rtx_code lcode = GET_CODE (lhs);
1277 enum rtx_code rcode = GET_CODE (rhs);
1279 /* Widening multiplies usually extend both operands, but sometimes
1280 they use a shift to extract a portion of a register. */
1281 if ((lcode == SIGN_EXTEND
1282 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1283 && (rcode == SIGN_EXTEND
1284 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1286 enum machine_mode lmode = GET_MODE (lhs);
1287 enum machine_mode rmode = GET_MODE (rhs);
1288 int bits;
1290 if (lcode == ASHIFTRT)
1291 /* Number of bits not shifted off the end. */
1292 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1293 else /* lcode == SIGN_EXTEND */
1294 /* Size of inner mode. */
1295 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1297 if (rcode == ASHIFTRT)
1298 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1299 else /* rcode == SIGN_EXTEND */
1300 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1302 /* We can only widen multiplies if the result is mathematiclly
1303 equivalent. I.e. if overflow was impossible. */
1304 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1305 return simplify_gen_binary
1306 (MULT, mode,
1307 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1308 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1312 /* Check for a sign extension of a subreg of a promoted
1313 variable, where the promotion is sign-extended, and the
1314 target mode is the same as the variable's promotion. */
1315 if (GET_CODE (op) == SUBREG
1316 && SUBREG_PROMOTED_VAR_P (op)
1317 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1318 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1320 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1321 if (temp)
1322 return temp;
1325 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1326 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1327 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1329 gcc_assert (GET_MODE_BITSIZE (mode)
1330 > GET_MODE_BITSIZE (GET_MODE (op)));
1331 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1332 GET_MODE (XEXP (op, 0)));
1335 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1336 is (sign_extend:M (subreg:O <X>)) if there is mode with
1337 GET_MODE_BITSIZE (N) - I bits.
1338 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1339 is similarly (zero_extend:M (subreg:O <X>)). */
1340 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1341 && GET_CODE (XEXP (op, 0)) == ASHIFT
1342 && CONST_INT_P (XEXP (op, 1))
1343 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1344 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1346 enum machine_mode tmode
1347 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1348 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1349 gcc_assert (GET_MODE_BITSIZE (mode)
1350 > GET_MODE_BITSIZE (GET_MODE (op)));
1351 if (tmode != BLKmode)
1353 rtx inner =
1354 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1355 if (inner)
1356 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1357 ? SIGN_EXTEND : ZERO_EXTEND,
1358 mode, inner, tmode);
1362 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1363 /* As we do not know which address space the pointer is referring to,
1364 we can do this only if the target does not support different pointer
1365 or address modes depending on the address space. */
1366 if (target_default_pointer_address_modes_p ()
1367 && ! POINTERS_EXTEND_UNSIGNED
1368 && mode == Pmode && GET_MODE (op) == ptr_mode
1369 && (CONSTANT_P (op)
1370 || (GET_CODE (op) == SUBREG
1371 && REG_P (SUBREG_REG (op))
1372 && REG_POINTER (SUBREG_REG (op))
1373 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1374 return convert_memory_address (Pmode, op);
1375 #endif
1376 break;
1378 case ZERO_EXTEND:
1379 /* Check for a zero extension of a subreg of a promoted
1380 variable, where the promotion is zero-extended, and the
1381 target mode is the same as the variable's promotion. */
1382 if (GET_CODE (op) == SUBREG
1383 && SUBREG_PROMOTED_VAR_P (op)
1384 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1385 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1387 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1388 if (temp)
1389 return temp;
1392 /* Extending a widening multiplication should be canonicalized to
1393 a wider widening multiplication. */
1394 if (GET_CODE (op) == MULT)
1396 rtx lhs = XEXP (op, 0);
1397 rtx rhs = XEXP (op, 1);
1398 enum rtx_code lcode = GET_CODE (lhs);
1399 enum rtx_code rcode = GET_CODE (rhs);
1401 /* Widening multiplies usually extend both operands, but sometimes
1402 they use a shift to extract a portion of a register. */
1403 if ((lcode == ZERO_EXTEND
1404 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1405 && (rcode == ZERO_EXTEND
1406 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1408 enum machine_mode lmode = GET_MODE (lhs);
1409 enum machine_mode rmode = GET_MODE (rhs);
1410 int bits;
1412 if (lcode == LSHIFTRT)
1413 /* Number of bits not shifted off the end. */
1414 bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1415 else /* lcode == ZERO_EXTEND */
1416 /* Size of inner mode. */
1417 bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1419 if (rcode == LSHIFTRT)
1420 bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1421 else /* rcode == ZERO_EXTEND */
1422 bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1424 /* We can only widen multiplies if the result is mathematiclly
1425 equivalent. I.e. if overflow was impossible. */
1426 if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1427 return simplify_gen_binary
1428 (MULT, mode,
1429 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1430 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1434 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1435 if (GET_CODE (op) == ZERO_EXTEND)
1436 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1437 GET_MODE (XEXP (op, 0)));
1439 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1440 is (zero_extend:M (subreg:O <X>)) if there is mode with
1441 GET_MODE_BITSIZE (N) - I bits. */
1442 if (GET_CODE (op) == LSHIFTRT
1443 && GET_CODE (XEXP (op, 0)) == ASHIFT
1444 && CONST_INT_P (XEXP (op, 1))
1445 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1446 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1448 enum machine_mode tmode
1449 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1450 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1451 if (tmode != BLKmode)
1453 rtx inner =
1454 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1455 if (inner)
1456 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1460 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1461 /* As we do not know which address space the pointer is referring to,
1462 we can do this only if the target does not support different pointer
1463 or address modes depending on the address space. */
1464 if (target_default_pointer_address_modes_p ()
1465 && POINTERS_EXTEND_UNSIGNED > 0
1466 && mode == Pmode && GET_MODE (op) == ptr_mode
1467 && (CONSTANT_P (op)
1468 || (GET_CODE (op) == SUBREG
1469 && REG_P (SUBREG_REG (op))
1470 && REG_POINTER (SUBREG_REG (op))
1471 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1472 return convert_memory_address (Pmode, op);
1473 #endif
1474 break;
1476 default:
1477 break;
1480 return 0;
1483 /* Try to compute the value of a unary operation CODE whose output mode is to
1484 be MODE with input operand OP whose mode was originally OP_MODE.
1485 Return zero if the value cannot be computed. */
1487 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1488 rtx op, enum machine_mode op_mode)
1490 unsigned int width = GET_MODE_PRECISION (mode);
1491 unsigned int op_width = GET_MODE_PRECISION (op_mode);
1493 if (code == VEC_DUPLICATE)
1495 gcc_assert (VECTOR_MODE_P (mode));
1496 if (GET_MODE (op) != VOIDmode)
1498 if (!VECTOR_MODE_P (GET_MODE (op)))
1499 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1500 else
1501 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1502 (GET_MODE (op)));
1504 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1505 || GET_CODE (op) == CONST_VECTOR)
1507 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1508 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1509 rtvec v = rtvec_alloc (n_elts);
1510 unsigned int i;
1512 if (GET_CODE (op) != CONST_VECTOR)
1513 for (i = 0; i < n_elts; i++)
1514 RTVEC_ELT (v, i) = op;
1515 else
1517 enum machine_mode inmode = GET_MODE (op);
1518 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1519 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1521 gcc_assert (in_n_elts < n_elts);
1522 gcc_assert ((n_elts % in_n_elts) == 0);
1523 for (i = 0; i < n_elts; i++)
1524 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1526 return gen_rtx_CONST_VECTOR (mode, v);
1530 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1532 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1533 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1534 enum machine_mode opmode = GET_MODE (op);
1535 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1536 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1537 rtvec v = rtvec_alloc (n_elts);
1538 unsigned int i;
1540 gcc_assert (op_n_elts == n_elts);
1541 for (i = 0; i < n_elts; i++)
1543 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1544 CONST_VECTOR_ELT (op, i),
1545 GET_MODE_INNER (opmode));
1546 if (!x)
1547 return 0;
1548 RTVEC_ELT (v, i) = x;
1550 return gen_rtx_CONST_VECTOR (mode, v);
1553 /* The order of these tests is critical so that, for example, we don't
1554 check the wrong mode (input vs. output) for a conversion operation,
1555 such as FIX. At some point, this should be simplified. */
1557 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1559 HOST_WIDE_INT hv, lv;
1560 REAL_VALUE_TYPE d;
1562 if (CONST_INT_P (op))
1563 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1564 else
1565 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1567 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1568 d = real_value_truncate (mode, d);
1569 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1571 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1573 HOST_WIDE_INT hv, lv;
1574 REAL_VALUE_TYPE d;
1576 if (CONST_INT_P (op))
1577 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1578 else
1579 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1581 if (op_mode == VOIDmode
1582 || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1583 /* We should never get a negative number. */
1584 gcc_assert (hv >= 0);
1585 else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1586 hv = 0, lv &= GET_MODE_MASK (op_mode);
1588 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1589 d = real_value_truncate (mode, d);
1590 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1593 if (CONST_INT_P (op)
1594 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1596 HOST_WIDE_INT arg0 = INTVAL (op);
1597 HOST_WIDE_INT val;
1599 switch (code)
1601 case NOT:
1602 val = ~ arg0;
1603 break;
1605 case NEG:
1606 val = - arg0;
1607 break;
1609 case ABS:
1610 val = (arg0 >= 0 ? arg0 : - arg0);
1611 break;
1613 case FFS:
1614 arg0 &= GET_MODE_MASK (mode);
1615 val = ffs_hwi (arg0);
1616 break;
1618 case CLZ:
1619 arg0 &= GET_MODE_MASK (mode);
1620 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1622 else
1623 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1624 break;
1626 case CLRSB:
1627 arg0 &= GET_MODE_MASK (mode);
1628 if (arg0 == 0)
1629 val = GET_MODE_PRECISION (mode) - 1;
1630 else if (arg0 >= 0)
1631 val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1632 else if (arg0 < 0)
1633 val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1634 break;
1636 case CTZ:
1637 arg0 &= GET_MODE_MASK (mode);
1638 if (arg0 == 0)
1640 /* Even if the value at zero is undefined, we have to come
1641 up with some replacement. Seems good enough. */
1642 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1643 val = GET_MODE_PRECISION (mode);
1645 else
1646 val = ctz_hwi (arg0);
1647 break;
1649 case POPCOUNT:
1650 arg0 &= GET_MODE_MASK (mode);
1651 val = 0;
1652 while (arg0)
1653 val++, arg0 &= arg0 - 1;
1654 break;
1656 case PARITY:
1657 arg0 &= GET_MODE_MASK (mode);
1658 val = 0;
1659 while (arg0)
1660 val++, arg0 &= arg0 - 1;
1661 val &= 1;
1662 break;
1664 case BSWAP:
1666 unsigned int s;
1668 val = 0;
1669 for (s = 0; s < width; s += 8)
1671 unsigned int d = width - s - 8;
1672 unsigned HOST_WIDE_INT byte;
1673 byte = (arg0 >> s) & 0xff;
1674 val |= byte << d;
1677 break;
1679 case TRUNCATE:
1680 val = arg0;
1681 break;
1683 case ZERO_EXTEND:
1684 /* When zero-extending a CONST_INT, we need to know its
1685 original mode. */
1686 gcc_assert (op_mode != VOIDmode);
1687 if (op_width == HOST_BITS_PER_WIDE_INT)
1689 /* If we were really extending the mode,
1690 we would have to distinguish between zero-extension
1691 and sign-extension. */
1692 gcc_assert (width == op_width);
1693 val = arg0;
1695 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1696 val = arg0 & GET_MODE_MASK (op_mode);
1697 else
1698 return 0;
1699 break;
1701 case SIGN_EXTEND:
1702 if (op_mode == VOIDmode)
1703 op_mode = mode;
1704 op_width = GET_MODE_PRECISION (op_mode);
1705 if (op_width == HOST_BITS_PER_WIDE_INT)
1707 /* If we were really extending the mode,
1708 we would have to distinguish between zero-extension
1709 and sign-extension. */
1710 gcc_assert (width == op_width);
1711 val = arg0;
1713 else if (op_width < HOST_BITS_PER_WIDE_INT)
1715 val = arg0 & GET_MODE_MASK (op_mode);
1716 if (val_signbit_known_set_p (op_mode, val))
1717 val |= ~GET_MODE_MASK (op_mode);
1719 else
1720 return 0;
1721 break;
1723 case SQRT:
1724 case FLOAT_EXTEND:
1725 case FLOAT_TRUNCATE:
1726 case SS_TRUNCATE:
1727 case US_TRUNCATE:
1728 case SS_NEG:
1729 case US_NEG:
1730 case SS_ABS:
1731 return 0;
1733 default:
1734 gcc_unreachable ();
1737 return gen_int_mode (val, mode);
1740 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1741 for a DImode operation on a CONST_INT. */
1742 else if (width <= HOST_BITS_PER_DOUBLE_INT
1743 && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1745 double_int first, value;
1747 if (CONST_DOUBLE_AS_INT_P (op))
1748 first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1749 CONST_DOUBLE_LOW (op));
1750 else
1751 first = double_int::from_shwi (INTVAL (op));
1753 switch (code)
1755 case NOT:
1756 value = ~first;
1757 break;
1759 case NEG:
1760 value = -first;
1761 break;
1763 case ABS:
1764 if (first.is_negative ())
1765 value = -first;
1766 else
1767 value = first;
1768 break;
1770 case FFS:
1771 value.high = 0;
1772 if (first.low != 0)
1773 value.low = ffs_hwi (first.low);
1774 else if (first.high != 0)
1775 value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1776 else
1777 value.low = 0;
1778 break;
1780 case CLZ:
1781 value.high = 0;
1782 if (first.high != 0)
1783 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1784 - HOST_BITS_PER_WIDE_INT;
1785 else if (first.low != 0)
1786 value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1787 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1788 value.low = GET_MODE_PRECISION (mode);
1789 break;
1791 case CTZ:
1792 value.high = 0;
1793 if (first.low != 0)
1794 value.low = ctz_hwi (first.low);
1795 else if (first.high != 0)
1796 value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1797 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1798 value.low = GET_MODE_PRECISION (mode);
1799 break;
1801 case POPCOUNT:
1802 value = double_int_zero;
1803 while (first.low)
1805 value.low++;
1806 first.low &= first.low - 1;
1808 while (first.high)
1810 value.low++;
1811 first.high &= first.high - 1;
1813 break;
1815 case PARITY:
1816 value = double_int_zero;
1817 while (first.low)
1819 value.low++;
1820 first.low &= first.low - 1;
1822 while (first.high)
1824 value.low++;
1825 first.high &= first.high - 1;
1827 value.low &= 1;
1828 break;
1830 case BSWAP:
1832 unsigned int s;
1834 value = double_int_zero;
1835 for (s = 0; s < width; s += 8)
1837 unsigned int d = width - s - 8;
1838 unsigned HOST_WIDE_INT byte;
1840 if (s < HOST_BITS_PER_WIDE_INT)
1841 byte = (first.low >> s) & 0xff;
1842 else
1843 byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1845 if (d < HOST_BITS_PER_WIDE_INT)
1846 value.low |= byte << d;
1847 else
1848 value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1851 break;
1853 case TRUNCATE:
1854 /* This is just a change-of-mode, so do nothing. */
1855 value = first;
1856 break;
1858 case ZERO_EXTEND:
1859 gcc_assert (op_mode != VOIDmode);
1861 if (op_width > HOST_BITS_PER_WIDE_INT)
1862 return 0;
1864 value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1865 break;
1867 case SIGN_EXTEND:
1868 if (op_mode == VOIDmode
1869 || op_width > HOST_BITS_PER_WIDE_INT)
1870 return 0;
1871 else
1873 value.low = first.low & GET_MODE_MASK (op_mode);
1874 if (val_signbit_known_set_p (op_mode, value.low))
1875 value.low |= ~GET_MODE_MASK (op_mode);
1877 value.high = HWI_SIGN_EXTEND (value.low);
1879 break;
1881 case SQRT:
1882 return 0;
1884 default:
1885 return 0;
1888 return immed_double_int_const (value, mode);
1891 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1892 && SCALAR_FLOAT_MODE_P (mode)
1893 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1895 REAL_VALUE_TYPE d, t;
1896 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1898 switch (code)
1900 case SQRT:
1901 if (HONOR_SNANS (mode) && real_isnan (&d))
1902 return 0;
1903 real_sqrt (&t, mode, &d);
1904 d = t;
1905 break;
1906 case ABS:
1907 d = real_value_abs (&d);
1908 break;
1909 case NEG:
1910 d = real_value_negate (&d);
1911 break;
1912 case FLOAT_TRUNCATE:
1913 d = real_value_truncate (mode, d);
1914 break;
1915 case FLOAT_EXTEND:
1916 /* All this does is change the mode, unless changing
1917 mode class. */
1918 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1919 real_convert (&d, mode, &d);
1920 break;
1921 case FIX:
1922 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1923 break;
1924 case NOT:
1926 long tmp[4];
1927 int i;
1929 real_to_target (tmp, &d, GET_MODE (op));
1930 for (i = 0; i < 4; i++)
1931 tmp[i] = ~tmp[i];
1932 real_from_target (&d, tmp, mode);
1933 break;
1935 default:
1936 gcc_unreachable ();
1938 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1941 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1942 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1943 && GET_MODE_CLASS (mode) == MODE_INT
1944 && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1946 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1947 operators are intentionally left unspecified (to ease implementation
1948 by target backends), for consistency, this routine implements the
1949 same semantics for constant folding as used by the middle-end. */
1951 /* This was formerly used only for non-IEEE float.
1952 eggert@twinsun.com says it is safe for IEEE also. */
1953 HOST_WIDE_INT xh, xl, th, tl;
1954 REAL_VALUE_TYPE x, t;
1955 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1956 switch (code)
1958 case FIX:
1959 if (REAL_VALUE_ISNAN (x))
1960 return const0_rtx;
1962 /* Test against the signed upper bound. */
1963 if (width > HOST_BITS_PER_WIDE_INT)
1965 th = ((unsigned HOST_WIDE_INT) 1
1966 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1967 tl = -1;
1969 else
1971 th = 0;
1972 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1974 real_from_integer (&t, VOIDmode, tl, th, 0);
1975 if (REAL_VALUES_LESS (t, x))
1977 xh = th;
1978 xl = tl;
1979 break;
1982 /* Test against the signed lower bound. */
1983 if (width > HOST_BITS_PER_WIDE_INT)
1985 th = (unsigned HOST_WIDE_INT) (-1)
1986 << (width - HOST_BITS_PER_WIDE_INT - 1);
1987 tl = 0;
1989 else
1991 th = -1;
1992 tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1994 real_from_integer (&t, VOIDmode, tl, th, 0);
1995 if (REAL_VALUES_LESS (x, t))
1997 xh = th;
1998 xl = tl;
1999 break;
2001 REAL_VALUE_TO_INT (&xl, &xh, x);
2002 break;
2004 case UNSIGNED_FIX:
2005 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2006 return const0_rtx;
2008 /* Test against the unsigned upper bound. */
2009 if (width == HOST_BITS_PER_DOUBLE_INT)
2011 th = -1;
2012 tl = -1;
2014 else if (width >= HOST_BITS_PER_WIDE_INT)
2016 th = ((unsigned HOST_WIDE_INT) 1
2017 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2018 tl = -1;
2020 else
2022 th = 0;
2023 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2025 real_from_integer (&t, VOIDmode, tl, th, 1);
2026 if (REAL_VALUES_LESS (t, x))
2028 xh = th;
2029 xl = tl;
2030 break;
2033 REAL_VALUE_TO_INT (&xl, &xh, x);
2034 break;
2036 default:
2037 gcc_unreachable ();
2039 return immed_double_const (xl, xh, mode);
2042 return NULL_RTX;
2045 /* Subroutine of simplify_binary_operation to simplify a commutative,
2046 associative binary operation CODE with result mode MODE, operating
2047 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2048 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2049 canonicalization is possible. */
2051 static rtx
2052 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2053 rtx op0, rtx op1)
2055 rtx tem;
2057 /* Linearize the operator to the left. */
2058 if (GET_CODE (op1) == code)
2060 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2061 if (GET_CODE (op0) == code)
2063 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2064 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2067 /* "a op (b op c)" becomes "(b op c) op a". */
2068 if (! swap_commutative_operands_p (op1, op0))
2069 return simplify_gen_binary (code, mode, op1, op0);
2071 tem = op0;
2072 op0 = op1;
2073 op1 = tem;
2076 if (GET_CODE (op0) == code)
2078 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2079 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2081 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2082 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2085 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2086 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2087 if (tem != 0)
2088 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2090 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2091 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2092 if (tem != 0)
2093 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2096 return 0;
2100 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2101 and OP1. Return 0 if no simplification is possible.
2103 Don't use this for relational operations such as EQ or LT.
2104 Use simplify_relational_operation instead. */
2106 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2107 rtx op0, rtx op1)
2109 rtx trueop0, trueop1;
2110 rtx tem;
2112 /* Relational operations don't work here. We must know the mode
2113 of the operands in order to do the comparison correctly.
2114 Assuming a full word can give incorrect results.
2115 Consider comparing 128 with -128 in QImode. */
2116 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2117 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2119 /* Make sure the constant is second. */
2120 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2121 && swap_commutative_operands_p (op0, op1))
2123 tem = op0, op0 = op1, op1 = tem;
2126 trueop0 = avoid_constant_pool_reference (op0);
2127 trueop1 = avoid_constant_pool_reference (op1);
2129 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2130 if (tem)
2131 return tem;
2132 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2135 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2136 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2137 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2138 actual constants. */
2140 static rtx
2141 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2142 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2144 rtx tem, reversed, opleft, opright;
2145 HOST_WIDE_INT val;
2146 unsigned int width = GET_MODE_PRECISION (mode);
2148 /* Even if we can't compute a constant result,
2149 there are some cases worth simplifying. */
2151 switch (code)
2153 case PLUS:
2154 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2155 when x is NaN, infinite, or finite and nonzero. They aren't
2156 when x is -0 and the rounding mode is not towards -infinity,
2157 since (-0) + 0 is then 0. */
2158 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2159 return op0;
2161 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2162 transformations are safe even for IEEE. */
2163 if (GET_CODE (op0) == NEG)
2164 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2165 else if (GET_CODE (op1) == NEG)
2166 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2168 /* (~a) + 1 -> -a */
2169 if (INTEGRAL_MODE_P (mode)
2170 && GET_CODE (op0) == NOT
2171 && trueop1 == const1_rtx)
2172 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2174 /* Handle both-operands-constant cases. We can only add
2175 CONST_INTs to constants since the sum of relocatable symbols
2176 can't be handled by most assemblers. Don't add CONST_INT
2177 to CONST_INT since overflow won't be computed properly if wider
2178 than HOST_BITS_PER_WIDE_INT. */
2180 if ((GET_CODE (op0) == CONST
2181 || GET_CODE (op0) == SYMBOL_REF
2182 || GET_CODE (op0) == LABEL_REF)
2183 && CONST_INT_P (op1))
2184 return plus_constant (mode, op0, INTVAL (op1));
2185 else if ((GET_CODE (op1) == CONST
2186 || GET_CODE (op1) == SYMBOL_REF
2187 || GET_CODE (op1) == LABEL_REF)
2188 && CONST_INT_P (op0))
2189 return plus_constant (mode, op1, INTVAL (op0));
2191 /* See if this is something like X * C - X or vice versa or
2192 if the multiplication is written as a shift. If so, we can
2193 distribute and make a new multiply, shift, or maybe just
2194 have X (if C is 2 in the example above). But don't make
2195 something more expensive than we had before. */
2197 if (SCALAR_INT_MODE_P (mode))
2199 double_int coeff0, coeff1;
2200 rtx lhs = op0, rhs = op1;
2202 coeff0 = double_int_one;
2203 coeff1 = double_int_one;
2205 if (GET_CODE (lhs) == NEG)
2207 coeff0 = double_int_minus_one;
2208 lhs = XEXP (lhs, 0);
2210 else if (GET_CODE (lhs) == MULT
2211 && CONST_INT_P (XEXP (lhs, 1)))
2213 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2214 lhs = XEXP (lhs, 0);
2216 else if (GET_CODE (lhs) == ASHIFT
2217 && CONST_INT_P (XEXP (lhs, 1))
2218 && INTVAL (XEXP (lhs, 1)) >= 0
2219 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2221 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2222 lhs = XEXP (lhs, 0);
2225 if (GET_CODE (rhs) == NEG)
2227 coeff1 = double_int_minus_one;
2228 rhs = XEXP (rhs, 0);
2230 else if (GET_CODE (rhs) == MULT
2231 && CONST_INT_P (XEXP (rhs, 1)))
2233 coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2234 rhs = XEXP (rhs, 0);
2236 else if (GET_CODE (rhs) == ASHIFT
2237 && CONST_INT_P (XEXP (rhs, 1))
2238 && INTVAL (XEXP (rhs, 1)) >= 0
2239 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2241 coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2242 rhs = XEXP (rhs, 0);
2245 if (rtx_equal_p (lhs, rhs))
2247 rtx orig = gen_rtx_PLUS (mode, op0, op1);
2248 rtx coeff;
2249 double_int val;
2250 bool speed = optimize_function_for_speed_p (cfun);
2252 val = coeff0 + coeff1;
2253 coeff = immed_double_int_const (val, mode);
2255 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2256 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2257 ? tem : 0;
2261 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2262 if (CONST_SCALAR_INT_P (op1)
2263 && GET_CODE (op0) == XOR
2264 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2265 && mode_signbit_p (mode, op1))
2266 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2267 simplify_gen_binary (XOR, mode, op1,
2268 XEXP (op0, 1)));
2270 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2271 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2272 && GET_CODE (op0) == MULT
2273 && GET_CODE (XEXP (op0, 0)) == NEG)
2275 rtx in1, in2;
2277 in1 = XEXP (XEXP (op0, 0), 0);
2278 in2 = XEXP (op0, 1);
2279 return simplify_gen_binary (MINUS, mode, op1,
2280 simplify_gen_binary (MULT, mode,
2281 in1, in2));
2284 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2285 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2286 is 1. */
2287 if (COMPARISON_P (op0)
2288 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2289 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2290 && (reversed = reversed_comparison (op0, mode)))
2291 return
2292 simplify_gen_unary (NEG, mode, reversed, mode);
2294 /* If one of the operands is a PLUS or a MINUS, see if we can
2295 simplify this by the associative law.
2296 Don't use the associative law for floating point.
2297 The inaccuracy makes it nonassociative,
2298 and subtle programs can break if operations are associated. */
2300 if (INTEGRAL_MODE_P (mode)
2301 && (plus_minus_operand_p (op0)
2302 || plus_minus_operand_p (op1))
2303 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2304 return tem;
2306 /* Reassociate floating point addition only when the user
2307 specifies associative math operations. */
2308 if (FLOAT_MODE_P (mode)
2309 && flag_associative_math)
2311 tem = simplify_associative_operation (code, mode, op0, op1);
2312 if (tem)
2313 return tem;
2315 break;
2317 case COMPARE:
2318 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2319 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2320 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2321 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2323 rtx xop00 = XEXP (op0, 0);
2324 rtx xop10 = XEXP (op1, 0);
2326 #ifdef HAVE_cc0
2327 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2328 #else
2329 if (REG_P (xop00) && REG_P (xop10)
2330 && GET_MODE (xop00) == GET_MODE (xop10)
2331 && REGNO (xop00) == REGNO (xop10)
2332 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2333 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2334 #endif
2335 return xop00;
2337 break;
2339 case MINUS:
2340 /* We can't assume x-x is 0 even with non-IEEE floating point,
2341 but since it is zero except in very strange circumstances, we
2342 will treat it as zero with -ffinite-math-only. */
2343 if (rtx_equal_p (trueop0, trueop1)
2344 && ! side_effects_p (op0)
2345 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2346 return CONST0_RTX (mode);
2348 /* Change subtraction from zero into negation. (0 - x) is the
2349 same as -x when x is NaN, infinite, or finite and nonzero.
2350 But if the mode has signed zeros, and does not round towards
2351 -infinity, then 0 - 0 is 0, not -0. */
2352 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2353 return simplify_gen_unary (NEG, mode, op1, mode);
2355 /* (-1 - a) is ~a. */
2356 if (trueop0 == constm1_rtx)
2357 return simplify_gen_unary (NOT, mode, op1, mode);
2359 /* Subtracting 0 has no effect unless the mode has signed zeros
2360 and supports rounding towards -infinity. In such a case,
2361 0 - 0 is -0. */
2362 if (!(HONOR_SIGNED_ZEROS (mode)
2363 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2364 && trueop1 == CONST0_RTX (mode))
2365 return op0;
2367 /* See if this is something like X * C - X or vice versa or
2368 if the multiplication is written as a shift. If so, we can
2369 distribute and make a new multiply, shift, or maybe just
2370 have X (if C is 2 in the example above). But don't make
2371 something more expensive than we had before. */
2373 if (SCALAR_INT_MODE_P (mode))
2375 double_int coeff0, negcoeff1;
2376 rtx lhs = op0, rhs = op1;
2378 coeff0 = double_int_one;
2379 negcoeff1 = double_int_minus_one;
2381 if (GET_CODE (lhs) == NEG)
2383 coeff0 = double_int_minus_one;
2384 lhs = XEXP (lhs, 0);
2386 else if (GET_CODE (lhs) == MULT
2387 && CONST_INT_P (XEXP (lhs, 1)))
2389 coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2390 lhs = XEXP (lhs, 0);
2392 else if (GET_CODE (lhs) == ASHIFT
2393 && CONST_INT_P (XEXP (lhs, 1))
2394 && INTVAL (XEXP (lhs, 1)) >= 0
2395 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2397 coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2398 lhs = XEXP (lhs, 0);
2401 if (GET_CODE (rhs) == NEG)
2403 negcoeff1 = double_int_one;
2404 rhs = XEXP (rhs, 0);
2406 else if (GET_CODE (rhs) == MULT
2407 && CONST_INT_P (XEXP (rhs, 1)))
2409 negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2410 rhs = XEXP (rhs, 0);
2412 else if (GET_CODE (rhs) == ASHIFT
2413 && CONST_INT_P (XEXP (rhs, 1))
2414 && INTVAL (XEXP (rhs, 1)) >= 0
2415 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2417 negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2418 negcoeff1 = -negcoeff1;
2419 rhs = XEXP (rhs, 0);
2422 if (rtx_equal_p (lhs, rhs))
2424 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2425 rtx coeff;
2426 double_int val;
2427 bool speed = optimize_function_for_speed_p (cfun);
2429 val = coeff0 + negcoeff1;
2430 coeff = immed_double_int_const (val, mode);
2432 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2433 return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2434 ? tem : 0;
2438 /* (a - (-b)) -> (a + b). True even for IEEE. */
2439 if (GET_CODE (op1) == NEG)
2440 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2442 /* (-x - c) may be simplified as (-c - x). */
2443 if (GET_CODE (op0) == NEG
2444 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2446 tem = simplify_unary_operation (NEG, mode, op1, mode);
2447 if (tem)
2448 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2451 /* Don't let a relocatable value get a negative coeff. */
2452 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2453 return simplify_gen_binary (PLUS, mode,
2454 op0,
2455 neg_const_int (mode, op1));
2457 /* (x - (x & y)) -> (x & ~y) */
2458 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2460 if (rtx_equal_p (op0, XEXP (op1, 0)))
2462 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2463 GET_MODE (XEXP (op1, 1)));
2464 return simplify_gen_binary (AND, mode, op0, tem);
2466 if (rtx_equal_p (op0, XEXP (op1, 1)))
2468 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2469 GET_MODE (XEXP (op1, 0)));
2470 return simplify_gen_binary (AND, mode, op0, tem);
2474 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2475 by reversing the comparison code if valid. */
2476 if (STORE_FLAG_VALUE == 1
2477 && trueop0 == const1_rtx
2478 && COMPARISON_P (op1)
2479 && (reversed = reversed_comparison (op1, mode)))
2480 return reversed;
2482 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2483 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2484 && GET_CODE (op1) == MULT
2485 && GET_CODE (XEXP (op1, 0)) == NEG)
2487 rtx in1, in2;
2489 in1 = XEXP (XEXP (op1, 0), 0);
2490 in2 = XEXP (op1, 1);
2491 return simplify_gen_binary (PLUS, mode,
2492 simplify_gen_binary (MULT, mode,
2493 in1, in2),
2494 op0);
2497 /* Canonicalize (minus (neg A) (mult B C)) to
2498 (minus (mult (neg B) C) A). */
2499 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2500 && GET_CODE (op1) == MULT
2501 && GET_CODE (op0) == NEG)
2503 rtx in1, in2;
2505 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2506 in2 = XEXP (op1, 1);
2507 return simplify_gen_binary (MINUS, mode,
2508 simplify_gen_binary (MULT, mode,
2509 in1, in2),
2510 XEXP (op0, 0));
2513 /* If one of the operands is a PLUS or a MINUS, see if we can
2514 simplify this by the associative law. This will, for example,
2515 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2516 Don't use the associative law for floating point.
2517 The inaccuracy makes it nonassociative,
2518 and subtle programs can break if operations are associated. */
2520 if (INTEGRAL_MODE_P (mode)
2521 && (plus_minus_operand_p (op0)
2522 || plus_minus_operand_p (op1))
2523 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2524 return tem;
2525 break;
2527 case MULT:
2528 if (trueop1 == constm1_rtx)
2529 return simplify_gen_unary (NEG, mode, op0, mode);
2531 if (GET_CODE (op0) == NEG)
2533 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2534 /* If op1 is a MULT as well and simplify_unary_operation
2535 just moved the NEG to the second operand, simplify_gen_binary
2536 below could through simplify_associative_operation move
2537 the NEG around again and recurse endlessly. */
2538 if (temp
2539 && GET_CODE (op1) == MULT
2540 && GET_CODE (temp) == MULT
2541 && XEXP (op1, 0) == XEXP (temp, 0)
2542 && GET_CODE (XEXP (temp, 1)) == NEG
2543 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2544 temp = NULL_RTX;
2545 if (temp)
2546 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2548 if (GET_CODE (op1) == NEG)
2550 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2551 /* If op0 is a MULT as well and simplify_unary_operation
2552 just moved the NEG to the second operand, simplify_gen_binary
2553 below could through simplify_associative_operation move
2554 the NEG around again and recurse endlessly. */
2555 if (temp
2556 && GET_CODE (op0) == MULT
2557 && GET_CODE (temp) == MULT
2558 && XEXP (op0, 0) == XEXP (temp, 0)
2559 && GET_CODE (XEXP (temp, 1)) == NEG
2560 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2561 temp = NULL_RTX;
2562 if (temp)
2563 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2566 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2567 x is NaN, since x * 0 is then also NaN. Nor is it valid
2568 when the mode has signed zeros, since multiplying a negative
2569 number by 0 will give -0, not 0. */
2570 if (!HONOR_NANS (mode)
2571 && !HONOR_SIGNED_ZEROS (mode)
2572 && trueop1 == CONST0_RTX (mode)
2573 && ! side_effects_p (op0))
2574 return op1;
2576 /* In IEEE floating point, x*1 is not equivalent to x for
2577 signalling NaNs. */
2578 if (!HONOR_SNANS (mode)
2579 && trueop1 == CONST1_RTX (mode))
2580 return op0;
2582 /* Convert multiply by constant power of two into shift unless
2583 we are still generating RTL. This test is a kludge. */
2584 if (CONST_INT_P (trueop1)
2585 && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2586 /* If the mode is larger than the host word size, and the
2587 uppermost bit is set, then this isn't a power of two due
2588 to implicit sign extension. */
2589 && (width <= HOST_BITS_PER_WIDE_INT
2590 || val != HOST_BITS_PER_WIDE_INT - 1))
2591 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2593 /* Likewise for multipliers wider than a word. */
2594 if (CONST_DOUBLE_AS_INT_P (trueop1)
2595 && GET_MODE (op0) == mode
2596 && CONST_DOUBLE_LOW (trueop1) == 0
2597 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2598 && (val < HOST_BITS_PER_DOUBLE_INT - 1
2599 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2600 return simplify_gen_binary (ASHIFT, mode, op0,
2601 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2603 /* x*2 is x+x and x*(-1) is -x */
2604 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2605 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2606 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2607 && GET_MODE (op0) == mode)
2609 REAL_VALUE_TYPE d;
2610 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2612 if (REAL_VALUES_EQUAL (d, dconst2))
2613 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2615 if (!HONOR_SNANS (mode)
2616 && REAL_VALUES_EQUAL (d, dconstm1))
2617 return simplify_gen_unary (NEG, mode, op0, mode);
2620 /* Optimize -x * -x as x * x. */
2621 if (FLOAT_MODE_P (mode)
2622 && GET_CODE (op0) == NEG
2623 && GET_CODE (op1) == NEG
2624 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2625 && !side_effects_p (XEXP (op0, 0)))
2626 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2628 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2629 if (SCALAR_FLOAT_MODE_P (mode)
2630 && GET_CODE (op0) == ABS
2631 && GET_CODE (op1) == ABS
2632 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2633 && !side_effects_p (XEXP (op0, 0)))
2634 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2636 /* Reassociate multiplication, but for floating point MULTs
2637 only when the user specifies unsafe math optimizations. */
2638 if (! FLOAT_MODE_P (mode)
2639 || flag_unsafe_math_optimizations)
2641 tem = simplify_associative_operation (code, mode, op0, op1);
2642 if (tem)
2643 return tem;
2645 break;
2647 case IOR:
2648 if (trueop1 == CONST0_RTX (mode))
2649 return op0;
2650 if (INTEGRAL_MODE_P (mode)
2651 && trueop1 == CONSTM1_RTX (mode)
2652 && !side_effects_p (op0))
2653 return op1;
2654 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2655 return op0;
2656 /* A | (~A) -> -1 */
2657 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2658 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2659 && ! side_effects_p (op0)
2660 && SCALAR_INT_MODE_P (mode))
2661 return constm1_rtx;
2663 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2664 if (CONST_INT_P (op1)
2665 && HWI_COMPUTABLE_MODE_P (mode)
2666 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2667 && !side_effects_p (op0))
2668 return op1;
2670 /* Canonicalize (X & C1) | C2. */
2671 if (GET_CODE (op0) == AND
2672 && CONST_INT_P (trueop1)
2673 && CONST_INT_P (XEXP (op0, 1)))
2675 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2676 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2677 HOST_WIDE_INT c2 = INTVAL (trueop1);
2679 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2680 if ((c1 & c2) == c1
2681 && !side_effects_p (XEXP (op0, 0)))
2682 return trueop1;
2684 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2685 if (((c1|c2) & mask) == mask)
2686 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2688 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2689 if (((c1 & ~c2) & mask) != (c1 & mask))
2691 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2692 gen_int_mode (c1 & ~c2, mode));
2693 return simplify_gen_binary (IOR, mode, tem, op1);
2697 /* Convert (A & B) | A to A. */
2698 if (GET_CODE (op0) == AND
2699 && (rtx_equal_p (XEXP (op0, 0), op1)
2700 || rtx_equal_p (XEXP (op0, 1), op1))
2701 && ! side_effects_p (XEXP (op0, 0))
2702 && ! side_effects_p (XEXP (op0, 1)))
2703 return op1;
2705 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2706 mode size to (rotate A CX). */
2708 if (GET_CODE (op1) == ASHIFT
2709 || GET_CODE (op1) == SUBREG)
2711 opleft = op1;
2712 opright = op0;
2714 else
2716 opright = op1;
2717 opleft = op0;
2720 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2721 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2722 && CONST_INT_P (XEXP (opleft, 1))
2723 && CONST_INT_P (XEXP (opright, 1))
2724 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2725 == GET_MODE_PRECISION (mode)))
2726 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2728 /* Same, but for ashift that has been "simplified" to a wider mode
2729 by simplify_shift_const. */
2731 if (GET_CODE (opleft) == SUBREG
2732 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2733 && GET_CODE (opright) == LSHIFTRT
2734 && GET_CODE (XEXP (opright, 0)) == SUBREG
2735 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2736 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2737 && (GET_MODE_SIZE (GET_MODE (opleft))
2738 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2739 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2740 SUBREG_REG (XEXP (opright, 0)))
2741 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2742 && CONST_INT_P (XEXP (opright, 1))
2743 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2744 == GET_MODE_PRECISION (mode)))
2745 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2746 XEXP (SUBREG_REG (opleft), 1));
2748 /* If we have (ior (and (X C1) C2)), simplify this by making
2749 C1 as small as possible if C1 actually changes. */
2750 if (CONST_INT_P (op1)
2751 && (HWI_COMPUTABLE_MODE_P (mode)
2752 || INTVAL (op1) > 0)
2753 && GET_CODE (op0) == AND
2754 && CONST_INT_P (XEXP (op0, 1))
2755 && CONST_INT_P (op1)
2756 && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2757 return simplify_gen_binary (IOR, mode,
2758 simplify_gen_binary
2759 (AND, mode, XEXP (op0, 0),
2760 GEN_INT (UINTVAL (XEXP (op0, 1))
2761 & ~UINTVAL (op1))),
2762 op1);
2764 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2765 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2766 the PLUS does not affect any of the bits in OP1: then we can do
2767 the IOR as a PLUS and we can associate. This is valid if OP1
2768 can be safely shifted left C bits. */
2769 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2770 && GET_CODE (XEXP (op0, 0)) == PLUS
2771 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2772 && CONST_INT_P (XEXP (op0, 1))
2773 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2775 int count = INTVAL (XEXP (op0, 1));
2776 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2778 if (mask >> count == INTVAL (trueop1)
2779 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2780 return simplify_gen_binary (ASHIFTRT, mode,
2781 plus_constant (mode, XEXP (op0, 0),
2782 mask),
2783 XEXP (op0, 1));
2786 tem = simplify_associative_operation (code, mode, op0, op1);
2787 if (tem)
2788 return tem;
2789 break;
2791 case XOR:
2792 if (trueop1 == CONST0_RTX (mode))
2793 return op0;
2794 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2795 return simplify_gen_unary (NOT, mode, op0, mode);
2796 if (rtx_equal_p (trueop0, trueop1)
2797 && ! side_effects_p (op0)
2798 && GET_MODE_CLASS (mode) != MODE_CC)
2799 return CONST0_RTX (mode);
2801 /* Canonicalize XOR of the most significant bit to PLUS. */
2802 if (CONST_SCALAR_INT_P (op1)
2803 && mode_signbit_p (mode, op1))
2804 return simplify_gen_binary (PLUS, mode, op0, op1);
2805 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2806 if (CONST_SCALAR_INT_P (op1)
2807 && GET_CODE (op0) == PLUS
2808 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2809 && mode_signbit_p (mode, XEXP (op0, 1)))
2810 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2811 simplify_gen_binary (XOR, mode, op1,
2812 XEXP (op0, 1)));
2814 /* If we are XORing two things that have no bits in common,
2815 convert them into an IOR. This helps to detect rotation encoded
2816 using those methods and possibly other simplifications. */
2818 if (HWI_COMPUTABLE_MODE_P (mode)
2819 && (nonzero_bits (op0, mode)
2820 & nonzero_bits (op1, mode)) == 0)
2821 return (simplify_gen_binary (IOR, mode, op0, op1));
2823 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2824 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2825 (NOT y). */
2827 int num_negated = 0;
2829 if (GET_CODE (op0) == NOT)
2830 num_negated++, op0 = XEXP (op0, 0);
2831 if (GET_CODE (op1) == NOT)
2832 num_negated++, op1 = XEXP (op1, 0);
2834 if (num_negated == 2)
2835 return simplify_gen_binary (XOR, mode, op0, op1);
2836 else if (num_negated == 1)
2837 return simplify_gen_unary (NOT, mode,
2838 simplify_gen_binary (XOR, mode, op0, op1),
2839 mode);
2842 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2843 correspond to a machine insn or result in further simplifications
2844 if B is a constant. */
2846 if (GET_CODE (op0) == AND
2847 && rtx_equal_p (XEXP (op0, 1), op1)
2848 && ! side_effects_p (op1))
2849 return simplify_gen_binary (AND, mode,
2850 simplify_gen_unary (NOT, mode,
2851 XEXP (op0, 0), mode),
2852 op1);
2854 else if (GET_CODE (op0) == AND
2855 && rtx_equal_p (XEXP (op0, 0), op1)
2856 && ! side_effects_p (op1))
2857 return simplify_gen_binary (AND, mode,
2858 simplify_gen_unary (NOT, mode,
2859 XEXP (op0, 1), mode),
2860 op1);
2862 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2863 we can transform like this:
2864 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2865 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2866 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2867 Attempt a few simplifications when B and C are both constants. */
2868 if (GET_CODE (op0) == AND
2869 && CONST_INT_P (op1)
2870 && CONST_INT_P (XEXP (op0, 1)))
2872 rtx a = XEXP (op0, 0);
2873 rtx b = XEXP (op0, 1);
2874 rtx c = op1;
2875 HOST_WIDE_INT bval = INTVAL (b);
2876 HOST_WIDE_INT cval = INTVAL (c);
2878 rtx na_c
2879 = simplify_binary_operation (AND, mode,
2880 simplify_gen_unary (NOT, mode, a, mode),
2882 if ((~cval & bval) == 0)
2884 /* Try to simplify ~A&C | ~B&C. */
2885 if (na_c != NULL_RTX)
2886 return simplify_gen_binary (IOR, mode, na_c,
2887 GEN_INT (~bval & cval));
2889 else
2891 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2892 if (na_c == const0_rtx)
2894 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2895 GEN_INT (~cval & bval));
2896 return simplify_gen_binary (IOR, mode, a_nc_b,
2897 GEN_INT (~bval & cval));
2902 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2903 comparison if STORE_FLAG_VALUE is 1. */
2904 if (STORE_FLAG_VALUE == 1
2905 && trueop1 == const1_rtx
2906 && COMPARISON_P (op0)
2907 && (reversed = reversed_comparison (op0, mode)))
2908 return reversed;
2910 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2911 is (lt foo (const_int 0)), so we can perform the above
2912 simplification if STORE_FLAG_VALUE is 1. */
2914 if (STORE_FLAG_VALUE == 1
2915 && trueop1 == const1_rtx
2916 && GET_CODE (op0) == LSHIFTRT
2917 && CONST_INT_P (XEXP (op0, 1))
2918 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2919 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2921 /* (xor (comparison foo bar) (const_int sign-bit))
2922 when STORE_FLAG_VALUE is the sign bit. */
2923 if (val_signbit_p (mode, STORE_FLAG_VALUE)
2924 && trueop1 == const_true_rtx
2925 && COMPARISON_P (op0)
2926 && (reversed = reversed_comparison (op0, mode)))
2927 return reversed;
2929 tem = simplify_associative_operation (code, mode, op0, op1);
2930 if (tem)
2931 return tem;
2932 break;
2934 case AND:
2935 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2936 return trueop1;
2937 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2938 return op0;
2939 if (HWI_COMPUTABLE_MODE_P (mode))
2941 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2942 HOST_WIDE_INT nzop1;
2943 if (CONST_INT_P (trueop1))
2945 HOST_WIDE_INT val1 = INTVAL (trueop1);
2946 /* If we are turning off bits already known off in OP0, we need
2947 not do an AND. */
2948 if ((nzop0 & ~val1) == 0)
2949 return op0;
2951 nzop1 = nonzero_bits (trueop1, mode);
2952 /* If we are clearing all the nonzero bits, the result is zero. */
2953 if ((nzop1 & nzop0) == 0
2954 && !side_effects_p (op0) && !side_effects_p (op1))
2955 return CONST0_RTX (mode);
2957 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2958 && GET_MODE_CLASS (mode) != MODE_CC)
2959 return op0;
2960 /* A & (~A) -> 0 */
2961 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2962 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2963 && ! side_effects_p (op0)
2964 && GET_MODE_CLASS (mode) != MODE_CC)
2965 return CONST0_RTX (mode);
2967 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2968 there are no nonzero bits of C outside of X's mode. */
2969 if ((GET_CODE (op0) == SIGN_EXTEND
2970 || GET_CODE (op0) == ZERO_EXTEND)
2971 && CONST_INT_P (trueop1)
2972 && HWI_COMPUTABLE_MODE_P (mode)
2973 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2974 & UINTVAL (trueop1)) == 0)
2976 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2977 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2978 gen_int_mode (INTVAL (trueop1),
2979 imode));
2980 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2983 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2984 we might be able to further simplify the AND with X and potentially
2985 remove the truncation altogether. */
2986 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2988 rtx x = XEXP (op0, 0);
2989 enum machine_mode xmode = GET_MODE (x);
2990 tem = simplify_gen_binary (AND, xmode, x,
2991 gen_int_mode (INTVAL (trueop1), xmode));
2992 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2995 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2996 if (GET_CODE (op0) == IOR
2997 && CONST_INT_P (trueop1)
2998 && CONST_INT_P (XEXP (op0, 1)))
3000 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3001 return simplify_gen_binary (IOR, mode,
3002 simplify_gen_binary (AND, mode,
3003 XEXP (op0, 0), op1),
3004 gen_int_mode (tmp, mode));
3007 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3008 insn (and may simplify more). */
3009 if (GET_CODE (op0) == XOR
3010 && rtx_equal_p (XEXP (op0, 0), op1)
3011 && ! side_effects_p (op1))
3012 return simplify_gen_binary (AND, mode,
3013 simplify_gen_unary (NOT, mode,
3014 XEXP (op0, 1), mode),
3015 op1);
3017 if (GET_CODE (op0) == XOR
3018 && rtx_equal_p (XEXP (op0, 1), op1)
3019 && ! side_effects_p (op1))
3020 return simplify_gen_binary (AND, mode,
3021 simplify_gen_unary (NOT, mode,
3022 XEXP (op0, 0), mode),
3023 op1);
3025 /* Similarly for (~(A ^ B)) & A. */
3026 if (GET_CODE (op0) == NOT
3027 && GET_CODE (XEXP (op0, 0)) == XOR
3028 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3029 && ! side_effects_p (op1))
3030 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3032 if (GET_CODE (op0) == NOT
3033 && GET_CODE (XEXP (op0, 0)) == XOR
3034 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3035 && ! side_effects_p (op1))
3036 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3038 /* Convert (A | B) & A to A. */
3039 if (GET_CODE (op0) == IOR
3040 && (rtx_equal_p (XEXP (op0, 0), op1)
3041 || rtx_equal_p (XEXP (op0, 1), op1))
3042 && ! side_effects_p (XEXP (op0, 0))
3043 && ! side_effects_p (XEXP (op0, 1)))
3044 return op1;
3046 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3047 ((A & N) + B) & M -> (A + B) & M
3048 Similarly if (N & M) == 0,
3049 ((A | N) + B) & M -> (A + B) & M
3050 and for - instead of + and/or ^ instead of |.
3051 Also, if (N & M) == 0, then
3052 (A +- N) & M -> A & M. */
3053 if (CONST_INT_P (trueop1)
3054 && HWI_COMPUTABLE_MODE_P (mode)
3055 && ~UINTVAL (trueop1)
3056 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3057 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3059 rtx pmop[2];
3060 int which;
3062 pmop[0] = XEXP (op0, 0);
3063 pmop[1] = XEXP (op0, 1);
3065 if (CONST_INT_P (pmop[1])
3066 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3067 return simplify_gen_binary (AND, mode, pmop[0], op1);
3069 for (which = 0; which < 2; which++)
3071 tem = pmop[which];
3072 switch (GET_CODE (tem))
3074 case AND:
3075 if (CONST_INT_P (XEXP (tem, 1))
3076 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3077 == UINTVAL (trueop1))
3078 pmop[which] = XEXP (tem, 0);
3079 break;
3080 case IOR:
3081 case XOR:
3082 if (CONST_INT_P (XEXP (tem, 1))
3083 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3084 pmop[which] = XEXP (tem, 0);
3085 break;
3086 default:
3087 break;
3091 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3093 tem = simplify_gen_binary (GET_CODE (op0), mode,
3094 pmop[0], pmop[1]);
3095 return simplify_gen_binary (code, mode, tem, op1);
3099 /* (and X (ior (not X) Y) -> (and X Y) */
3100 if (GET_CODE (op1) == IOR
3101 && GET_CODE (XEXP (op1, 0)) == NOT
3102 && op0 == XEXP (XEXP (op1, 0), 0))
3103 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3105 /* (and (ior (not X) Y) X) -> (and X Y) */
3106 if (GET_CODE (op0) == IOR
3107 && GET_CODE (XEXP (op0, 0)) == NOT
3108 && op1 == XEXP (XEXP (op0, 0), 0))
3109 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3111 tem = simplify_associative_operation (code, mode, op0, op1);
3112 if (tem)
3113 return tem;
3114 break;
3116 case UDIV:
3117 /* 0/x is 0 (or x&0 if x has side-effects). */
3118 if (trueop0 == CONST0_RTX (mode))
3120 if (side_effects_p (op1))
3121 return simplify_gen_binary (AND, mode, op1, trueop0);
3122 return trueop0;
3124 /* x/1 is x. */
3125 if (trueop1 == CONST1_RTX (mode))
3127 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3128 if (tem)
3129 return tem;
3131 /* Convert divide by power of two into shift. */
3132 if (CONST_INT_P (trueop1)
3133 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3134 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3135 break;
3137 case DIV:
3138 /* Handle floating point and integers separately. */
3139 if (SCALAR_FLOAT_MODE_P (mode))
3141 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3142 safe for modes with NaNs, since 0.0 / 0.0 will then be
3143 NaN rather than 0.0. Nor is it safe for modes with signed
3144 zeros, since dividing 0 by a negative number gives -0.0 */
3145 if (trueop0 == CONST0_RTX (mode)
3146 && !HONOR_NANS (mode)
3147 && !HONOR_SIGNED_ZEROS (mode)
3148 && ! side_effects_p (op1))
3149 return op0;
3150 /* x/1.0 is x. */
3151 if (trueop1 == CONST1_RTX (mode)
3152 && !HONOR_SNANS (mode))
3153 return op0;
3155 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3156 && trueop1 != CONST0_RTX (mode))
3158 REAL_VALUE_TYPE d;
3159 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3161 /* x/-1.0 is -x. */
3162 if (REAL_VALUES_EQUAL (d, dconstm1)
3163 && !HONOR_SNANS (mode))
3164 return simplify_gen_unary (NEG, mode, op0, mode);
3166 /* Change FP division by a constant into multiplication.
3167 Only do this with -freciprocal-math. */
3168 if (flag_reciprocal_math
3169 && !REAL_VALUES_EQUAL (d, dconst0))
3171 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3172 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3173 return simplify_gen_binary (MULT, mode, op0, tem);
3177 else if (SCALAR_INT_MODE_P (mode))
3179 /* 0/x is 0 (or x&0 if x has side-effects). */
3180 if (trueop0 == CONST0_RTX (mode)
3181 && !cfun->can_throw_non_call_exceptions)
3183 if (side_effects_p (op1))
3184 return simplify_gen_binary (AND, mode, op1, trueop0);
3185 return trueop0;
3187 /* x/1 is x. */
3188 if (trueop1 == CONST1_RTX (mode))
3190 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3191 if (tem)
3192 return tem;
3194 /* x/-1 is -x. */
3195 if (trueop1 == constm1_rtx)
3197 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3198 if (x)
3199 return simplify_gen_unary (NEG, mode, x, mode);
3202 break;
3204 case UMOD:
3205 /* 0%x is 0 (or x&0 if x has side-effects). */
3206 if (trueop0 == CONST0_RTX (mode))
3208 if (side_effects_p (op1))
3209 return simplify_gen_binary (AND, mode, op1, trueop0);
3210 return trueop0;
3212 /* x%1 is 0 (of x&0 if x has side-effects). */
3213 if (trueop1 == CONST1_RTX (mode))
3215 if (side_effects_p (op0))
3216 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3217 return CONST0_RTX (mode);
3219 /* Implement modulus by power of two as AND. */
3220 if (CONST_INT_P (trueop1)
3221 && exact_log2 (UINTVAL (trueop1)) > 0)
3222 return simplify_gen_binary (AND, mode, op0,
3223 GEN_INT (INTVAL (op1) - 1));
3224 break;
3226 case MOD:
3227 /* 0%x is 0 (or x&0 if x has side-effects). */
3228 if (trueop0 == CONST0_RTX (mode))
3230 if (side_effects_p (op1))
3231 return simplify_gen_binary (AND, mode, op1, trueop0);
3232 return trueop0;
3234 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3235 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3237 if (side_effects_p (op0))
3238 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3239 return CONST0_RTX (mode);
3241 break;
3243 case ROTATERT:
3244 case ROTATE:
3245 case ASHIFTRT:
3246 if (trueop1 == CONST0_RTX (mode))
3247 return op0;
3248 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3249 return op0;
3250 /* Rotating ~0 always results in ~0. */
3251 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3252 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3253 && ! side_effects_p (op1))
3254 return op0;
3255 canonicalize_shift:
3256 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3258 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3259 if (val != INTVAL (op1))
3260 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3262 break;
3264 case ASHIFT:
3265 case SS_ASHIFT:
3266 case US_ASHIFT:
3267 if (trueop1 == CONST0_RTX (mode))
3268 return op0;
3269 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3270 return op0;
3271 goto canonicalize_shift;
3273 case LSHIFTRT:
3274 if (trueop1 == CONST0_RTX (mode))
3275 return op0;
3276 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3277 return op0;
3278 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3279 if (GET_CODE (op0) == CLZ
3280 && CONST_INT_P (trueop1)
3281 && STORE_FLAG_VALUE == 1
3282 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3284 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3285 unsigned HOST_WIDE_INT zero_val = 0;
3287 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3288 && zero_val == GET_MODE_PRECISION (imode)
3289 && INTVAL (trueop1) == exact_log2 (zero_val))
3290 return simplify_gen_relational (EQ, mode, imode,
3291 XEXP (op0, 0), const0_rtx);
3293 goto canonicalize_shift;
3295 case SMIN:
3296 if (width <= HOST_BITS_PER_WIDE_INT
3297 && mode_signbit_p (mode, trueop1)
3298 && ! side_effects_p (op0))
3299 return op1;
3300 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3301 return op0;
3302 tem = simplify_associative_operation (code, mode, op0, op1);
3303 if (tem)
3304 return tem;
3305 break;
3307 case SMAX:
3308 if (width <= HOST_BITS_PER_WIDE_INT
3309 && CONST_INT_P (trueop1)
3310 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3311 && ! side_effects_p (op0))
3312 return op1;
3313 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3314 return op0;
3315 tem = simplify_associative_operation (code, mode, op0, op1);
3316 if (tem)
3317 return tem;
3318 break;
3320 case UMIN:
3321 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3322 return op1;
3323 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3324 return op0;
3325 tem = simplify_associative_operation (code, mode, op0, op1);
3326 if (tem)
3327 return tem;
3328 break;
3330 case UMAX:
3331 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3332 return op1;
3333 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3334 return op0;
3335 tem = simplify_associative_operation (code, mode, op0, op1);
3336 if (tem)
3337 return tem;
3338 break;
3340 case SS_PLUS:
3341 case US_PLUS:
3342 case SS_MINUS:
3343 case US_MINUS:
3344 case SS_MULT:
3345 case US_MULT:
3346 case SS_DIV:
3347 case US_DIV:
3348 /* ??? There are simplifications that can be done. */
3349 return 0;
3351 case VEC_SELECT:
3352 if (!VECTOR_MODE_P (mode))
3354 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3355 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3356 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3357 gcc_assert (XVECLEN (trueop1, 0) == 1);
3358 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3360 if (GET_CODE (trueop0) == CONST_VECTOR)
3361 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3362 (trueop1, 0, 0)));
3364 /* Extract a scalar element from a nested VEC_SELECT expression
3365 (with optional nested VEC_CONCAT expression). Some targets
3366 (i386) extract scalar element from a vector using chain of
3367 nested VEC_SELECT expressions. When input operand is a memory
3368 operand, this operation can be simplified to a simple scalar
3369 load from an offseted memory address. */
3370 if (GET_CODE (trueop0) == VEC_SELECT)
3372 rtx op0 = XEXP (trueop0, 0);
3373 rtx op1 = XEXP (trueop0, 1);
3375 enum machine_mode opmode = GET_MODE (op0);
3376 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3377 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3379 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3380 int elem;
3382 rtvec vec;
3383 rtx tmp_op, tmp;
3385 gcc_assert (GET_CODE (op1) == PARALLEL);
3386 gcc_assert (i < n_elts);
3388 /* Select element, pointed by nested selector. */
3389 elem = INTVAL (XVECEXP (op1, 0, i));
3391 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3392 if (GET_CODE (op0) == VEC_CONCAT)
3394 rtx op00 = XEXP (op0, 0);
3395 rtx op01 = XEXP (op0, 1);
3397 enum machine_mode mode00, mode01;
3398 int n_elts00, n_elts01;
3400 mode00 = GET_MODE (op00);
3401 mode01 = GET_MODE (op01);
3403 /* Find out number of elements of each operand. */
3404 if (VECTOR_MODE_P (mode00))
3406 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3407 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3409 else
3410 n_elts00 = 1;
3412 if (VECTOR_MODE_P (mode01))
3414 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3415 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3417 else
3418 n_elts01 = 1;
3420 gcc_assert (n_elts == n_elts00 + n_elts01);
3422 /* Select correct operand of VEC_CONCAT
3423 and adjust selector. */
3424 if (elem < n_elts01)
3425 tmp_op = op00;
3426 else
3428 tmp_op = op01;
3429 elem -= n_elts00;
3432 else
3433 tmp_op = op0;
3435 vec = rtvec_alloc (1);
3436 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3438 tmp = gen_rtx_fmt_ee (code, mode,
3439 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3440 return tmp;
3442 if (GET_CODE (trueop0) == VEC_DUPLICATE
3443 && GET_MODE (XEXP (trueop0, 0)) == mode)
3444 return XEXP (trueop0, 0);
3446 else
3448 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3449 gcc_assert (GET_MODE_INNER (mode)
3450 == GET_MODE_INNER (GET_MODE (trueop0)));
3451 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3453 if (GET_CODE (trueop0) == CONST_VECTOR)
3455 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3456 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3457 rtvec v = rtvec_alloc (n_elts);
3458 unsigned int i;
3460 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3461 for (i = 0; i < n_elts; i++)
3463 rtx x = XVECEXP (trueop1, 0, i);
3465 gcc_assert (CONST_INT_P (x));
3466 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3467 INTVAL (x));
3470 return gen_rtx_CONST_VECTOR (mode, v);
3473 /* Recognize the identity. */
3474 if (GET_MODE (trueop0) == mode)
3476 bool maybe_ident = true;
3477 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3479 rtx j = XVECEXP (trueop1, 0, i);
3480 if (!CONST_INT_P (j) || INTVAL (j) != i)
3482 maybe_ident = false;
3483 break;
3486 if (maybe_ident)
3487 return trueop0;
3490 /* If we build {a,b} then permute it, build the result directly. */
3491 if (XVECLEN (trueop1, 0) == 2
3492 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3493 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3494 && GET_CODE (trueop0) == VEC_CONCAT
3495 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3496 && GET_MODE (XEXP (trueop0, 0)) == mode
3497 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3498 && GET_MODE (XEXP (trueop0, 1)) == mode)
3500 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3501 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3502 rtx subop0, subop1;
3504 gcc_assert (i0 < 4 && i1 < 4);
3505 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3506 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3508 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3511 if (XVECLEN (trueop1, 0) == 2
3512 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3513 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3514 && GET_CODE (trueop0) == VEC_CONCAT
3515 && GET_MODE (trueop0) == mode)
3517 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3518 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3519 rtx subop0, subop1;
3521 gcc_assert (i0 < 2 && i1 < 2);
3522 subop0 = XEXP (trueop0, i0);
3523 subop1 = XEXP (trueop0, i1);
3525 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3529 if (XVECLEN (trueop1, 0) == 1
3530 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3531 && GET_CODE (trueop0) == VEC_CONCAT)
3533 rtx vec = trueop0;
3534 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3536 /* Try to find the element in the VEC_CONCAT. */
3537 while (GET_MODE (vec) != mode
3538 && GET_CODE (vec) == VEC_CONCAT)
3540 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3541 if (offset < vec_size)
3542 vec = XEXP (vec, 0);
3543 else
3545 offset -= vec_size;
3546 vec = XEXP (vec, 1);
3548 vec = avoid_constant_pool_reference (vec);
3551 if (GET_MODE (vec) == mode)
3552 return vec;
3555 return 0;
3556 case VEC_CONCAT:
3558 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3559 ? GET_MODE (trueop0)
3560 : GET_MODE_INNER (mode));
3561 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3562 ? GET_MODE (trueop1)
3563 : GET_MODE_INNER (mode));
3565 gcc_assert (VECTOR_MODE_P (mode));
3566 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3567 == GET_MODE_SIZE (mode));
3569 if (VECTOR_MODE_P (op0_mode))
3570 gcc_assert (GET_MODE_INNER (mode)
3571 == GET_MODE_INNER (op0_mode));
3572 else
3573 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3575 if (VECTOR_MODE_P (op1_mode))
3576 gcc_assert (GET_MODE_INNER (mode)
3577 == GET_MODE_INNER (op1_mode));
3578 else
3579 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3581 if ((GET_CODE (trueop0) == CONST_VECTOR
3582 || CONST_SCALAR_INT_P (trueop0)
3583 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3584 && (GET_CODE (trueop1) == CONST_VECTOR
3585 || CONST_SCALAR_INT_P (trueop1)
3586 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3588 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3589 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3590 rtvec v = rtvec_alloc (n_elts);
3591 unsigned int i;
3592 unsigned in_n_elts = 1;
3594 if (VECTOR_MODE_P (op0_mode))
3595 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3596 for (i = 0; i < n_elts; i++)
3598 if (i < in_n_elts)
3600 if (!VECTOR_MODE_P (op0_mode))
3601 RTVEC_ELT (v, i) = trueop0;
3602 else
3603 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3605 else
3607 if (!VECTOR_MODE_P (op1_mode))
3608 RTVEC_ELT (v, i) = trueop1;
3609 else
3610 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3611 i - in_n_elts);
3615 return gen_rtx_CONST_VECTOR (mode, v);
3618 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3619 if (GET_CODE (trueop0) == VEC_SELECT
3620 && GET_CODE (trueop1) == VEC_SELECT
3621 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3623 rtx par0 = XEXP (trueop0, 1);
3624 rtx par1 = XEXP (trueop1, 1);
3625 int len0 = XVECLEN (par0, 0);
3626 int len1 = XVECLEN (par1, 0);
3627 rtvec vec = rtvec_alloc (len0 + len1);
3628 for (int i = 0; i < len0; i++)
3629 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3630 for (int i = 0; i < len1; i++)
3631 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3632 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3633 gen_rtx_PARALLEL (VOIDmode, vec));
3636 return 0;
3638 default:
3639 gcc_unreachable ();
3642 return 0;
3646 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3647 rtx op0, rtx op1)
3649 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3650 HOST_WIDE_INT val;
3651 unsigned int width = GET_MODE_PRECISION (mode);
3653 if (VECTOR_MODE_P (mode)
3654 && code != VEC_CONCAT
3655 && GET_CODE (op0) == CONST_VECTOR
3656 && GET_CODE (op1) == CONST_VECTOR)
3658 unsigned n_elts = GET_MODE_NUNITS (mode);
3659 enum machine_mode op0mode = GET_MODE (op0);
3660 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3661 enum machine_mode op1mode = GET_MODE (op1);
3662 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3663 rtvec v = rtvec_alloc (n_elts);
3664 unsigned int i;
3666 gcc_assert (op0_n_elts == n_elts);
3667 gcc_assert (op1_n_elts == n_elts);
3668 for (i = 0; i < n_elts; i++)
3670 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3671 CONST_VECTOR_ELT (op0, i),
3672 CONST_VECTOR_ELT (op1, i));
3673 if (!x)
3674 return 0;
3675 RTVEC_ELT (v, i) = x;
3678 return gen_rtx_CONST_VECTOR (mode, v);
3681 if (VECTOR_MODE_P (mode)
3682 && code == VEC_CONCAT
3683 && (CONST_SCALAR_INT_P (op0)
3684 || GET_CODE (op0) == CONST_FIXED
3685 || CONST_DOUBLE_AS_FLOAT_P (op0))
3686 && (CONST_SCALAR_INT_P (op1)
3687 || CONST_DOUBLE_AS_FLOAT_P (op1)
3688 || GET_CODE (op1) == CONST_FIXED))
3690 unsigned n_elts = GET_MODE_NUNITS (mode);
3691 rtvec v = rtvec_alloc (n_elts);
3693 gcc_assert (n_elts >= 2);
3694 if (n_elts == 2)
3696 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3697 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3699 RTVEC_ELT (v, 0) = op0;
3700 RTVEC_ELT (v, 1) = op1;
3702 else
3704 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3705 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3706 unsigned i;
3708 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3709 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3710 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3712 for (i = 0; i < op0_n_elts; ++i)
3713 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3714 for (i = 0; i < op1_n_elts; ++i)
3715 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3718 return gen_rtx_CONST_VECTOR (mode, v);
3721 if (SCALAR_FLOAT_MODE_P (mode)
3722 && CONST_DOUBLE_AS_FLOAT_P (op0)
3723 && CONST_DOUBLE_AS_FLOAT_P (op1)
3724 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3726 if (code == AND
3727 || code == IOR
3728 || code == XOR)
3730 long tmp0[4];
3731 long tmp1[4];
3732 REAL_VALUE_TYPE r;
3733 int i;
3735 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3736 GET_MODE (op0));
3737 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3738 GET_MODE (op1));
3739 for (i = 0; i < 4; i++)
3741 switch (code)
3743 case AND:
3744 tmp0[i] &= tmp1[i];
3745 break;
3746 case IOR:
3747 tmp0[i] |= tmp1[i];
3748 break;
3749 case XOR:
3750 tmp0[i] ^= tmp1[i];
3751 break;
3752 default:
3753 gcc_unreachable ();
3756 real_from_target (&r, tmp0, mode);
3757 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3759 else
3761 REAL_VALUE_TYPE f0, f1, value, result;
3762 bool inexact;
3764 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3765 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3766 real_convert (&f0, mode, &f0);
3767 real_convert (&f1, mode, &f1);
3769 if (HONOR_SNANS (mode)
3770 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3771 return 0;
3773 if (code == DIV
3774 && REAL_VALUES_EQUAL (f1, dconst0)
3775 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3776 return 0;
3778 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3779 && flag_trapping_math
3780 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3782 int s0 = REAL_VALUE_NEGATIVE (f0);
3783 int s1 = REAL_VALUE_NEGATIVE (f1);
3785 switch (code)
3787 case PLUS:
3788 /* Inf + -Inf = NaN plus exception. */
3789 if (s0 != s1)
3790 return 0;
3791 break;
3792 case MINUS:
3793 /* Inf - Inf = NaN plus exception. */
3794 if (s0 == s1)
3795 return 0;
3796 break;
3797 case DIV:
3798 /* Inf / Inf = NaN plus exception. */
3799 return 0;
3800 default:
3801 break;
3805 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3806 && flag_trapping_math
3807 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3808 || (REAL_VALUE_ISINF (f1)
3809 && REAL_VALUES_EQUAL (f0, dconst0))))
3810 /* Inf * 0 = NaN plus exception. */
3811 return 0;
3813 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3814 &f0, &f1);
3815 real_convert (&result, mode, &value);
3817 /* Don't constant fold this floating point operation if
3818 the result has overflowed and flag_trapping_math. */
3820 if (flag_trapping_math
3821 && MODE_HAS_INFINITIES (mode)
3822 && REAL_VALUE_ISINF (result)
3823 && !REAL_VALUE_ISINF (f0)
3824 && !REAL_VALUE_ISINF (f1))
3825 /* Overflow plus exception. */
3826 return 0;
3828 /* Don't constant fold this floating point operation if the
3829 result may dependent upon the run-time rounding mode and
3830 flag_rounding_math is set, or if GCC's software emulation
3831 is unable to accurately represent the result. */
3833 if ((flag_rounding_math
3834 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3835 && (inexact || !real_identical (&result, &value)))
3836 return NULL_RTX;
3838 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3842 /* We can fold some multi-word operations. */
3843 if (GET_MODE_CLASS (mode) == MODE_INT
3844 && width == HOST_BITS_PER_DOUBLE_INT
3845 && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3846 && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3848 double_int o0, o1, res, tmp;
3849 bool overflow;
3851 o0 = rtx_to_double_int (op0);
3852 o1 = rtx_to_double_int (op1);
3854 switch (code)
3856 case MINUS:
3857 /* A - B == A + (-B). */
3858 o1 = -o1;
3860 /* Fall through.... */
3862 case PLUS:
3863 res = o0 + o1;
3864 break;
3866 case MULT:
3867 res = o0 * o1;
3868 break;
3870 case DIV:
3871 res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3872 &tmp, &overflow);
3873 if (overflow)
3874 return 0;
3875 break;
3877 case MOD:
3878 tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3879 &res, &overflow);
3880 if (overflow)
3881 return 0;
3882 break;
3884 case UDIV:
3885 res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3886 &tmp, &overflow);
3887 if (overflow)
3888 return 0;
3889 break;
3891 case UMOD:
3892 tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3893 &res, &overflow);
3894 if (overflow)
3895 return 0;
3896 break;
3898 case AND:
3899 res = o0 & o1;
3900 break;
3902 case IOR:
3903 res = o0 | o1;
3904 break;
3906 case XOR:
3907 res = o0 ^ o1;
3908 break;
3910 case SMIN:
3911 res = o0.smin (o1);
3912 break;
3914 case SMAX:
3915 res = o0.smax (o1);
3916 break;
3918 case UMIN:
3919 res = o0.umin (o1);
3920 break;
3922 case UMAX:
3923 res = o0.umax (o1);
3924 break;
3926 case LSHIFTRT: case ASHIFTRT:
3927 case ASHIFT:
3928 case ROTATE: case ROTATERT:
3930 unsigned HOST_WIDE_INT cnt;
3932 if (SHIFT_COUNT_TRUNCATED)
3934 o1.high = 0;
3935 o1.low &= GET_MODE_PRECISION (mode) - 1;
3938 if (!o1.fits_uhwi ()
3939 || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3940 return 0;
3942 cnt = o1.to_uhwi ();
3943 unsigned short prec = GET_MODE_PRECISION (mode);
3945 if (code == LSHIFTRT || code == ASHIFTRT)
3946 res = o0.rshift (cnt, prec, code == ASHIFTRT);
3947 else if (code == ASHIFT)
3948 res = o0.alshift (cnt, prec);
3949 else if (code == ROTATE)
3950 res = o0.lrotate (cnt, prec);
3951 else /* code == ROTATERT */
3952 res = o0.rrotate (cnt, prec);
3954 break;
3956 default:
3957 return 0;
3960 return immed_double_int_const (res, mode);
3963 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3964 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3966 /* Get the integer argument values in two forms:
3967 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3969 arg0 = INTVAL (op0);
3970 arg1 = INTVAL (op1);
3972 if (width < HOST_BITS_PER_WIDE_INT)
3974 arg0 &= GET_MODE_MASK (mode);
3975 arg1 &= GET_MODE_MASK (mode);
3977 arg0s = arg0;
3978 if (val_signbit_known_set_p (mode, arg0s))
3979 arg0s |= ~GET_MODE_MASK (mode);
3981 arg1s = arg1;
3982 if (val_signbit_known_set_p (mode, arg1s))
3983 arg1s |= ~GET_MODE_MASK (mode);
3985 else
3987 arg0s = arg0;
3988 arg1s = arg1;
3991 /* Compute the value of the arithmetic. */
3993 switch (code)
3995 case PLUS:
3996 val = arg0s + arg1s;
3997 break;
3999 case MINUS:
4000 val = arg0s - arg1s;
4001 break;
4003 case MULT:
4004 val = arg0s * arg1s;
4005 break;
4007 case DIV:
4008 if (arg1s == 0
4009 || ((unsigned HOST_WIDE_INT) arg0s
4010 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4011 && arg1s == -1))
4012 return 0;
4013 val = arg0s / arg1s;
4014 break;
4016 case MOD:
4017 if (arg1s == 0
4018 || ((unsigned HOST_WIDE_INT) arg0s
4019 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4020 && arg1s == -1))
4021 return 0;
4022 val = arg0s % arg1s;
4023 break;
4025 case UDIV:
4026 if (arg1 == 0
4027 || ((unsigned HOST_WIDE_INT) arg0s
4028 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4029 && arg1s == -1))
4030 return 0;
4031 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4032 break;
4034 case UMOD:
4035 if (arg1 == 0
4036 || ((unsigned HOST_WIDE_INT) arg0s
4037 == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4038 && arg1s == -1))
4039 return 0;
4040 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4041 break;
4043 case AND:
4044 val = arg0 & arg1;
4045 break;
4047 case IOR:
4048 val = arg0 | arg1;
4049 break;
4051 case XOR:
4052 val = arg0 ^ arg1;
4053 break;
4055 case LSHIFTRT:
4056 case ASHIFT:
4057 case ASHIFTRT:
4058 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4059 the value is in range. We can't return any old value for
4060 out-of-range arguments because either the middle-end (via
4061 shift_truncation_mask) or the back-end might be relying on
4062 target-specific knowledge. Nor can we rely on
4063 shift_truncation_mask, since the shift might not be part of an
4064 ashlM3, lshrM3 or ashrM3 instruction. */
4065 if (SHIFT_COUNT_TRUNCATED)
4066 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4067 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4068 return 0;
4070 val = (code == ASHIFT
4071 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4072 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4074 /* Sign-extend the result for arithmetic right shifts. */
4075 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4076 val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4077 break;
4079 case ROTATERT:
4080 if (arg1 < 0)
4081 return 0;
4083 arg1 %= width;
4084 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4085 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4086 break;
4088 case ROTATE:
4089 if (arg1 < 0)
4090 return 0;
4092 arg1 %= width;
4093 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4094 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4095 break;
4097 case COMPARE:
4098 /* Do nothing here. */
4099 return 0;
4101 case SMIN:
4102 val = arg0s <= arg1s ? arg0s : arg1s;
4103 break;
4105 case UMIN:
4106 val = ((unsigned HOST_WIDE_INT) arg0
4107 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4108 break;
4110 case SMAX:
4111 val = arg0s > arg1s ? arg0s : arg1s;
4112 break;
4114 case UMAX:
4115 val = ((unsigned HOST_WIDE_INT) arg0
4116 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4117 break;
4119 case SS_PLUS:
4120 case US_PLUS:
4121 case SS_MINUS:
4122 case US_MINUS:
4123 case SS_MULT:
4124 case US_MULT:
4125 case SS_DIV:
4126 case US_DIV:
4127 case SS_ASHIFT:
4128 case US_ASHIFT:
4129 /* ??? There are simplifications that can be done. */
4130 return 0;
4132 default:
4133 gcc_unreachable ();
4136 return gen_int_mode (val, mode);
4139 return NULL_RTX;
4144 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4145 PLUS or MINUS.
4147 Rather than test for specific case, we do this by a brute-force method
4148 and do all possible simplifications until no more changes occur. Then
4149 we rebuild the operation. */
4151 struct simplify_plus_minus_op_data
4153 rtx op;
4154 short neg;
4157 static bool
4158 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4160 int result;
4162 result = (commutative_operand_precedence (y)
4163 - commutative_operand_precedence (x));
4164 if (result)
4165 return result > 0;
4167 /* Group together equal REGs to do more simplification. */
4168 if (REG_P (x) && REG_P (y))
4169 return REGNO (x) > REGNO (y);
4170 else
4171 return false;
4174 static rtx
4175 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4176 rtx op1)
4178 struct simplify_plus_minus_op_data ops[8];
4179 rtx result, tem;
4180 int n_ops = 2, input_ops = 2;
4181 int changed, n_constants = 0, canonicalized = 0;
4182 int i, j;
4184 memset (ops, 0, sizeof ops);
4186 /* Set up the two operands and then expand them until nothing has been
4187 changed. If we run out of room in our array, give up; this should
4188 almost never happen. */
4190 ops[0].op = op0;
4191 ops[0].neg = 0;
4192 ops[1].op = op1;
4193 ops[1].neg = (code == MINUS);
4197 changed = 0;
4199 for (i = 0; i < n_ops; i++)
4201 rtx this_op = ops[i].op;
4202 int this_neg = ops[i].neg;
4203 enum rtx_code this_code = GET_CODE (this_op);
4205 switch (this_code)
4207 case PLUS:
4208 case MINUS:
4209 if (n_ops == 7)
4210 return NULL_RTX;
4212 ops[n_ops].op = XEXP (this_op, 1);
4213 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4214 n_ops++;
4216 ops[i].op = XEXP (this_op, 0);
4217 input_ops++;
4218 changed = 1;
4219 canonicalized |= this_neg;
4220 break;
4222 case NEG:
4223 ops[i].op = XEXP (this_op, 0);
4224 ops[i].neg = ! this_neg;
4225 changed = 1;
4226 canonicalized = 1;
4227 break;
4229 case CONST:
4230 if (n_ops < 7
4231 && GET_CODE (XEXP (this_op, 0)) == PLUS
4232 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4233 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4235 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4236 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4237 ops[n_ops].neg = this_neg;
4238 n_ops++;
4239 changed = 1;
4240 canonicalized = 1;
4242 break;
4244 case NOT:
4245 /* ~a -> (-a - 1) */
4246 if (n_ops != 7)
4248 ops[n_ops].op = CONSTM1_RTX (mode);
4249 ops[n_ops++].neg = this_neg;
4250 ops[i].op = XEXP (this_op, 0);
4251 ops[i].neg = !this_neg;
4252 changed = 1;
4253 canonicalized = 1;
4255 break;
4257 case CONST_INT:
4258 n_constants++;
4259 if (this_neg)
4261 ops[i].op = neg_const_int (mode, this_op);
4262 ops[i].neg = 0;
4263 changed = 1;
4264 canonicalized = 1;
4266 break;
4268 default:
4269 break;
4273 while (changed);
4275 if (n_constants > 1)
4276 canonicalized = 1;
4278 gcc_assert (n_ops >= 2);
4280 /* If we only have two operands, we can avoid the loops. */
4281 if (n_ops == 2)
4283 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4284 rtx lhs, rhs;
4286 /* Get the two operands. Be careful with the order, especially for
4287 the cases where code == MINUS. */
4288 if (ops[0].neg && ops[1].neg)
4290 lhs = gen_rtx_NEG (mode, ops[0].op);
4291 rhs = ops[1].op;
4293 else if (ops[0].neg)
4295 lhs = ops[1].op;
4296 rhs = ops[0].op;
4298 else
4300 lhs = ops[0].op;
4301 rhs = ops[1].op;
4304 return simplify_const_binary_operation (code, mode, lhs, rhs);
4307 /* Now simplify each pair of operands until nothing changes. */
4310 /* Insertion sort is good enough for an eight-element array. */
4311 for (i = 1; i < n_ops; i++)
4313 struct simplify_plus_minus_op_data save;
4314 j = i - 1;
4315 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4316 continue;
4318 canonicalized = 1;
4319 save = ops[i];
4321 ops[j + 1] = ops[j];
4322 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4323 ops[j + 1] = save;
4326 changed = 0;
4327 for (i = n_ops - 1; i > 0; i--)
4328 for (j = i - 1; j >= 0; j--)
4330 rtx lhs = ops[j].op, rhs = ops[i].op;
4331 int lneg = ops[j].neg, rneg = ops[i].neg;
4333 if (lhs != 0 && rhs != 0)
4335 enum rtx_code ncode = PLUS;
4337 if (lneg != rneg)
4339 ncode = MINUS;
4340 if (lneg)
4341 tem = lhs, lhs = rhs, rhs = tem;
4343 else if (swap_commutative_operands_p (lhs, rhs))
4344 tem = lhs, lhs = rhs, rhs = tem;
4346 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4347 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4349 rtx tem_lhs, tem_rhs;
4351 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4352 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4353 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4355 if (tem && !CONSTANT_P (tem))
4356 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4358 else
4359 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4361 /* Reject "simplifications" that just wrap the two
4362 arguments in a CONST. Failure to do so can result
4363 in infinite recursion with simplify_binary_operation
4364 when it calls us to simplify CONST operations. */
4365 if (tem
4366 && ! (GET_CODE (tem) == CONST
4367 && GET_CODE (XEXP (tem, 0)) == ncode
4368 && XEXP (XEXP (tem, 0), 0) == lhs
4369 && XEXP (XEXP (tem, 0), 1) == rhs))
4371 lneg &= rneg;
4372 if (GET_CODE (tem) == NEG)
4373 tem = XEXP (tem, 0), lneg = !lneg;
4374 if (CONST_INT_P (tem) && lneg)
4375 tem = neg_const_int (mode, tem), lneg = 0;
4377 ops[i].op = tem;
4378 ops[i].neg = lneg;
4379 ops[j].op = NULL_RTX;
4380 changed = 1;
4381 canonicalized = 1;
4386 /* If nothing changed, fail. */
4387 if (!canonicalized)
4388 return NULL_RTX;
4390 /* Pack all the operands to the lower-numbered entries. */
4391 for (i = 0, j = 0; j < n_ops; j++)
4392 if (ops[j].op)
4394 ops[i] = ops[j];
4395 i++;
4397 n_ops = i;
4399 while (changed);
4401 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4402 if (n_ops == 2
4403 && CONST_INT_P (ops[1].op)
4404 && CONSTANT_P (ops[0].op)
4405 && ops[0].neg)
4406 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4408 /* We suppressed creation of trivial CONST expressions in the
4409 combination loop to avoid recursion. Create one manually now.
4410 The combination loop should have ensured that there is exactly
4411 one CONST_INT, and the sort will have ensured that it is last
4412 in the array and that any other constant will be next-to-last. */
4414 if (n_ops > 1
4415 && CONST_INT_P (ops[n_ops - 1].op)
4416 && CONSTANT_P (ops[n_ops - 2].op))
4418 rtx value = ops[n_ops - 1].op;
4419 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4420 value = neg_const_int (mode, value);
4421 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4422 INTVAL (value));
4423 n_ops--;
4426 /* Put a non-negated operand first, if possible. */
4428 for (i = 0; i < n_ops && ops[i].neg; i++)
4429 continue;
4430 if (i == n_ops)
4431 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4432 else if (i != 0)
4434 tem = ops[0].op;
4435 ops[0] = ops[i];
4436 ops[i].op = tem;
4437 ops[i].neg = 1;
4440 /* Now make the result by performing the requested operations. */
4441 result = ops[0].op;
4442 for (i = 1; i < n_ops; i++)
4443 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4444 mode, result, ops[i].op);
4446 return result;
4449 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4450 static bool
4451 plus_minus_operand_p (const_rtx x)
4453 return GET_CODE (x) == PLUS
4454 || GET_CODE (x) == MINUS
4455 || (GET_CODE (x) == CONST
4456 && GET_CODE (XEXP (x, 0)) == PLUS
4457 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4458 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4461 /* Like simplify_binary_operation except used for relational operators.
4462 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4463 not also be VOIDmode.
4465 CMP_MODE specifies in which mode the comparison is done in, so it is
4466 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4467 the operands or, if both are VOIDmode, the operands are compared in
4468 "infinite precision". */
4470 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4471 enum machine_mode cmp_mode, rtx op0, rtx op1)
4473 rtx tem, trueop0, trueop1;
4475 if (cmp_mode == VOIDmode)
4476 cmp_mode = GET_MODE (op0);
4477 if (cmp_mode == VOIDmode)
4478 cmp_mode = GET_MODE (op1);
4480 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4481 if (tem)
4483 if (SCALAR_FLOAT_MODE_P (mode))
4485 if (tem == const0_rtx)
4486 return CONST0_RTX (mode);
4487 #ifdef FLOAT_STORE_FLAG_VALUE
4489 REAL_VALUE_TYPE val;
4490 val = FLOAT_STORE_FLAG_VALUE (mode);
4491 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4493 #else
4494 return NULL_RTX;
4495 #endif
4497 if (VECTOR_MODE_P (mode))
4499 if (tem == const0_rtx)
4500 return CONST0_RTX (mode);
4501 #ifdef VECTOR_STORE_FLAG_VALUE
4503 int i, units;
4504 rtvec v;
4506 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4507 if (val == NULL_RTX)
4508 return NULL_RTX;
4509 if (val == const1_rtx)
4510 return CONST1_RTX (mode);
4512 units = GET_MODE_NUNITS (mode);
4513 v = rtvec_alloc (units);
4514 for (i = 0; i < units; i++)
4515 RTVEC_ELT (v, i) = val;
4516 return gen_rtx_raw_CONST_VECTOR (mode, v);
4518 #else
4519 return NULL_RTX;
4520 #endif
4523 return tem;
4526 /* For the following tests, ensure const0_rtx is op1. */
4527 if (swap_commutative_operands_p (op0, op1)
4528 || (op0 == const0_rtx && op1 != const0_rtx))
4529 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4531 /* If op0 is a compare, extract the comparison arguments from it. */
4532 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4533 return simplify_gen_relational (code, mode, VOIDmode,
4534 XEXP (op0, 0), XEXP (op0, 1));
4536 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4537 || CC0_P (op0))
4538 return NULL_RTX;
4540 trueop0 = avoid_constant_pool_reference (op0);
4541 trueop1 = avoid_constant_pool_reference (op1);
4542 return simplify_relational_operation_1 (code, mode, cmp_mode,
4543 trueop0, trueop1);
4546 /* This part of simplify_relational_operation is only used when CMP_MODE
4547 is not in class MODE_CC (i.e. it is a real comparison).
4549 MODE is the mode of the result, while CMP_MODE specifies in which
4550 mode the comparison is done in, so it is the mode of the operands. */
4552 static rtx
4553 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4554 enum machine_mode cmp_mode, rtx op0, rtx op1)
4556 enum rtx_code op0code = GET_CODE (op0);
4558 if (op1 == const0_rtx && COMPARISON_P (op0))
4560 /* If op0 is a comparison, extract the comparison arguments
4561 from it. */
4562 if (code == NE)
4564 if (GET_MODE (op0) == mode)
4565 return simplify_rtx (op0);
4566 else
4567 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4568 XEXP (op0, 0), XEXP (op0, 1));
4570 else if (code == EQ)
4572 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4573 if (new_code != UNKNOWN)
4574 return simplify_gen_relational (new_code, mode, VOIDmode,
4575 XEXP (op0, 0), XEXP (op0, 1));
4579 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4580 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4581 if ((code == LTU || code == GEU)
4582 && GET_CODE (op0) == PLUS
4583 && CONST_INT_P (XEXP (op0, 1))
4584 && (rtx_equal_p (op1, XEXP (op0, 0))
4585 || rtx_equal_p (op1, XEXP (op0, 1)))
4586 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4587 && XEXP (op0, 1) != const0_rtx)
4589 rtx new_cmp
4590 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4591 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4592 cmp_mode, XEXP (op0, 0), new_cmp);
4595 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4596 if ((code == LTU || code == GEU)
4597 && GET_CODE (op0) == PLUS
4598 && rtx_equal_p (op1, XEXP (op0, 1))
4599 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4600 && !rtx_equal_p (op1, XEXP (op0, 0)))
4601 return simplify_gen_relational (code, mode, cmp_mode, op0,
4602 copy_rtx (XEXP (op0, 0)));
4604 if (op1 == const0_rtx)
4606 /* Canonicalize (GTU x 0) as (NE x 0). */
4607 if (code == GTU)
4608 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4609 /* Canonicalize (LEU x 0) as (EQ x 0). */
4610 if (code == LEU)
4611 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4613 else if (op1 == const1_rtx)
4615 switch (code)
4617 case GE:
4618 /* Canonicalize (GE x 1) as (GT x 0). */
4619 return simplify_gen_relational (GT, mode, cmp_mode,
4620 op0, const0_rtx);
4621 case GEU:
4622 /* Canonicalize (GEU x 1) as (NE x 0). */
4623 return simplify_gen_relational (NE, mode, cmp_mode,
4624 op0, const0_rtx);
4625 case LT:
4626 /* Canonicalize (LT x 1) as (LE x 0). */
4627 return simplify_gen_relational (LE, mode, cmp_mode,
4628 op0, const0_rtx);
4629 case LTU:
4630 /* Canonicalize (LTU x 1) as (EQ x 0). */
4631 return simplify_gen_relational (EQ, mode, cmp_mode,
4632 op0, const0_rtx);
4633 default:
4634 break;
4637 else if (op1 == constm1_rtx)
4639 /* Canonicalize (LE x -1) as (LT x 0). */
4640 if (code == LE)
4641 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4642 /* Canonicalize (GT x -1) as (GE x 0). */
4643 if (code == GT)
4644 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4647 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4648 if ((code == EQ || code == NE)
4649 && (op0code == PLUS || op0code == MINUS)
4650 && CONSTANT_P (op1)
4651 && CONSTANT_P (XEXP (op0, 1))
4652 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4654 rtx x = XEXP (op0, 0);
4655 rtx c = XEXP (op0, 1);
4656 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4657 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4659 /* Detect an infinite recursive condition, where we oscillate at this
4660 simplification case between:
4661 A + B == C <---> C - B == A,
4662 where A, B, and C are all constants with non-simplifiable expressions,
4663 usually SYMBOL_REFs. */
4664 if (GET_CODE (tem) == invcode
4665 && CONSTANT_P (x)
4666 && rtx_equal_p (c, XEXP (tem, 1)))
4667 return NULL_RTX;
4669 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4672 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4673 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4674 if (code == NE
4675 && op1 == const0_rtx
4676 && GET_MODE_CLASS (mode) == MODE_INT
4677 && cmp_mode != VOIDmode
4678 /* ??? Work-around BImode bugs in the ia64 backend. */
4679 && mode != BImode
4680 && cmp_mode != BImode
4681 && nonzero_bits (op0, cmp_mode) == 1
4682 && STORE_FLAG_VALUE == 1)
4683 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4684 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4685 : lowpart_subreg (mode, op0, cmp_mode);
4687 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4688 if ((code == EQ || code == NE)
4689 && op1 == const0_rtx
4690 && op0code == XOR)
4691 return simplify_gen_relational (code, mode, cmp_mode,
4692 XEXP (op0, 0), XEXP (op0, 1));
4694 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4695 if ((code == EQ || code == NE)
4696 && op0code == XOR
4697 && rtx_equal_p (XEXP (op0, 0), op1)
4698 && !side_effects_p (XEXP (op0, 0)))
4699 return simplify_gen_relational (code, mode, cmp_mode,
4700 XEXP (op0, 1), const0_rtx);
4702 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4703 if ((code == EQ || code == NE)
4704 && op0code == XOR
4705 && rtx_equal_p (XEXP (op0, 1), op1)
4706 && !side_effects_p (XEXP (op0, 1)))
4707 return simplify_gen_relational (code, mode, cmp_mode,
4708 XEXP (op0, 0), const0_rtx);
4710 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4711 if ((code == EQ || code == NE)
4712 && op0code == XOR
4713 && CONST_SCALAR_INT_P (op1)
4714 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4715 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4716 simplify_gen_binary (XOR, cmp_mode,
4717 XEXP (op0, 1), op1));
4719 if (op0code == POPCOUNT && op1 == const0_rtx)
4720 switch (code)
4722 case EQ:
4723 case LE:
4724 case LEU:
4725 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4726 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4727 XEXP (op0, 0), const0_rtx);
4729 case NE:
4730 case GT:
4731 case GTU:
4732 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4733 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4734 XEXP (op0, 0), const0_rtx);
4736 default:
4737 break;
4740 return NULL_RTX;
4743 enum
4745 CMP_EQ = 1,
4746 CMP_LT = 2,
4747 CMP_GT = 4,
4748 CMP_LTU = 8,
4749 CMP_GTU = 16
4753 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4754 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4755 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4756 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4757 For floating-point comparisons, assume that the operands were ordered. */
4759 static rtx
4760 comparison_result (enum rtx_code code, int known_results)
4762 switch (code)
4764 case EQ:
4765 case UNEQ:
4766 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4767 case NE:
4768 case LTGT:
4769 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4771 case LT:
4772 case UNLT:
4773 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4774 case GE:
4775 case UNGE:
4776 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4778 case GT:
4779 case UNGT:
4780 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4781 case LE:
4782 case UNLE:
4783 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4785 case LTU:
4786 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4787 case GEU:
4788 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4790 case GTU:
4791 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4792 case LEU:
4793 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4795 case ORDERED:
4796 return const_true_rtx;
4797 case UNORDERED:
4798 return const0_rtx;
4799 default:
4800 gcc_unreachable ();
4804 /* Check if the given comparison (done in the given MODE) is actually a
4805 tautology or a contradiction.
4806 If no simplification is possible, this function returns zero.
4807 Otherwise, it returns either const_true_rtx or const0_rtx. */
4810 simplify_const_relational_operation (enum rtx_code code,
4811 enum machine_mode mode,
4812 rtx op0, rtx op1)
4814 rtx tem;
4815 rtx trueop0;
4816 rtx trueop1;
4818 gcc_assert (mode != VOIDmode
4819 || (GET_MODE (op0) == VOIDmode
4820 && GET_MODE (op1) == VOIDmode));
4822 /* If op0 is a compare, extract the comparison arguments from it. */
4823 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4825 op1 = XEXP (op0, 1);
4826 op0 = XEXP (op0, 0);
4828 if (GET_MODE (op0) != VOIDmode)
4829 mode = GET_MODE (op0);
4830 else if (GET_MODE (op1) != VOIDmode)
4831 mode = GET_MODE (op1);
4832 else
4833 return 0;
4836 /* We can't simplify MODE_CC values since we don't know what the
4837 actual comparison is. */
4838 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4839 return 0;
4841 /* Make sure the constant is second. */
4842 if (swap_commutative_operands_p (op0, op1))
4844 tem = op0, op0 = op1, op1 = tem;
4845 code = swap_condition (code);
4848 trueop0 = avoid_constant_pool_reference (op0);
4849 trueop1 = avoid_constant_pool_reference (op1);
4851 /* For integer comparisons of A and B maybe we can simplify A - B and can
4852 then simplify a comparison of that with zero. If A and B are both either
4853 a register or a CONST_INT, this can't help; testing for these cases will
4854 prevent infinite recursion here and speed things up.
4856 We can only do this for EQ and NE comparisons as otherwise we may
4857 lose or introduce overflow which we cannot disregard as undefined as
4858 we do not know the signedness of the operation on either the left or
4859 the right hand side of the comparison. */
4861 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4862 && (code == EQ || code == NE)
4863 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4864 && (REG_P (op1) || CONST_INT_P (trueop1)))
4865 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4866 /* We cannot do this if tem is a nonzero address. */
4867 && ! nonzero_address_p (tem))
4868 return simplify_const_relational_operation (signed_condition (code),
4869 mode, tem, const0_rtx);
4871 if (! HONOR_NANS (mode) && code == ORDERED)
4872 return const_true_rtx;
4874 if (! HONOR_NANS (mode) && code == UNORDERED)
4875 return const0_rtx;
4877 /* For modes without NaNs, if the two operands are equal, we know the
4878 result except if they have side-effects. Even with NaNs we know
4879 the result of unordered comparisons and, if signaling NaNs are
4880 irrelevant, also the result of LT/GT/LTGT. */
4881 if ((! HONOR_NANS (GET_MODE (trueop0))
4882 || code == UNEQ || code == UNLE || code == UNGE
4883 || ((code == LT || code == GT || code == LTGT)
4884 && ! HONOR_SNANS (GET_MODE (trueop0))))
4885 && rtx_equal_p (trueop0, trueop1)
4886 && ! side_effects_p (trueop0))
4887 return comparison_result (code, CMP_EQ);
4889 /* If the operands are floating-point constants, see if we can fold
4890 the result. */
4891 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4892 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4893 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4895 REAL_VALUE_TYPE d0, d1;
4897 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4898 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4900 /* Comparisons are unordered iff at least one of the values is NaN. */
4901 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4902 switch (code)
4904 case UNEQ:
4905 case UNLT:
4906 case UNGT:
4907 case UNLE:
4908 case UNGE:
4909 case NE:
4910 case UNORDERED:
4911 return const_true_rtx;
4912 case EQ:
4913 case LT:
4914 case GT:
4915 case LE:
4916 case GE:
4917 case LTGT:
4918 case ORDERED:
4919 return const0_rtx;
4920 default:
4921 return 0;
4924 return comparison_result (code,
4925 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4926 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4929 /* Otherwise, see if the operands are both integers. */
4930 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4931 && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4932 && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4934 int width = GET_MODE_PRECISION (mode);
4935 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4936 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4938 /* Get the two words comprising each integer constant. */
4939 if (CONST_DOUBLE_AS_INT_P (trueop0))
4941 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4942 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4944 else
4946 l0u = l0s = INTVAL (trueop0);
4947 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4950 if (CONST_DOUBLE_AS_INT_P (trueop1))
4952 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4953 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4955 else
4957 l1u = l1s = INTVAL (trueop1);
4958 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4961 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4962 we have to sign or zero-extend the values. */
4963 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4965 l0u &= GET_MODE_MASK (mode);
4966 l1u &= GET_MODE_MASK (mode);
4968 if (val_signbit_known_set_p (mode, l0s))
4969 l0s |= ~GET_MODE_MASK (mode);
4971 if (val_signbit_known_set_p (mode, l1s))
4972 l1s |= ~GET_MODE_MASK (mode);
4974 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4975 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4977 if (h0u == h1u && l0u == l1u)
4978 return comparison_result (code, CMP_EQ);
4979 else
4981 int cr;
4982 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4983 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4984 return comparison_result (code, cr);
4988 /* Optimize comparisons with upper and lower bounds. */
4989 if (HWI_COMPUTABLE_MODE_P (mode)
4990 && CONST_INT_P (trueop1))
4992 int sign;
4993 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4994 HOST_WIDE_INT val = INTVAL (trueop1);
4995 HOST_WIDE_INT mmin, mmax;
4997 if (code == GEU
4998 || code == LEU
4999 || code == GTU
5000 || code == LTU)
5001 sign = 0;
5002 else
5003 sign = 1;
5005 /* Get a reduced range if the sign bit is zero. */
5006 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5008 mmin = 0;
5009 mmax = nonzero;
5011 else
5013 rtx mmin_rtx, mmax_rtx;
5014 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5016 mmin = INTVAL (mmin_rtx);
5017 mmax = INTVAL (mmax_rtx);
5018 if (sign)
5020 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5022 mmin >>= (sign_copies - 1);
5023 mmax >>= (sign_copies - 1);
5027 switch (code)
5029 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5030 case GEU:
5031 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5032 return const_true_rtx;
5033 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5034 return const0_rtx;
5035 break;
5036 case GE:
5037 if (val <= mmin)
5038 return const_true_rtx;
5039 if (val > mmax)
5040 return const0_rtx;
5041 break;
5043 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5044 case LEU:
5045 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5046 return const_true_rtx;
5047 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5048 return const0_rtx;
5049 break;
5050 case LE:
5051 if (val >= mmax)
5052 return const_true_rtx;
5053 if (val < mmin)
5054 return const0_rtx;
5055 break;
5057 case EQ:
5058 /* x == y is always false for y out of range. */
5059 if (val < mmin || val > mmax)
5060 return const0_rtx;
5061 break;
5063 /* x > y is always false for y >= mmax, always true for y < mmin. */
5064 case GTU:
5065 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5066 return const0_rtx;
5067 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5068 return const_true_rtx;
5069 break;
5070 case GT:
5071 if (val >= mmax)
5072 return const0_rtx;
5073 if (val < mmin)
5074 return const_true_rtx;
5075 break;
5077 /* x < y is always false for y <= mmin, always true for y > mmax. */
5078 case LTU:
5079 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5080 return const0_rtx;
5081 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5082 return const_true_rtx;
5083 break;
5084 case LT:
5085 if (val <= mmin)
5086 return const0_rtx;
5087 if (val > mmax)
5088 return const_true_rtx;
5089 break;
5091 case NE:
5092 /* x != y is always true for y out of range. */
5093 if (val < mmin || val > mmax)
5094 return const_true_rtx;
5095 break;
5097 default:
5098 break;
5102 /* Optimize integer comparisons with zero. */
5103 if (trueop1 == const0_rtx)
5105 /* Some addresses are known to be nonzero. We don't know
5106 their sign, but equality comparisons are known. */
5107 if (nonzero_address_p (trueop0))
5109 if (code == EQ || code == LEU)
5110 return const0_rtx;
5111 if (code == NE || code == GTU)
5112 return const_true_rtx;
5115 /* See if the first operand is an IOR with a constant. If so, we
5116 may be able to determine the result of this comparison. */
5117 if (GET_CODE (op0) == IOR)
5119 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5120 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5122 int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5123 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5124 && (UINTVAL (inner_const)
5125 & ((unsigned HOST_WIDE_INT) 1
5126 << sign_bitnum)));
5128 switch (code)
5130 case EQ:
5131 case LEU:
5132 return const0_rtx;
5133 case NE:
5134 case GTU:
5135 return const_true_rtx;
5136 case LT:
5137 case LE:
5138 if (has_sign)
5139 return const_true_rtx;
5140 break;
5141 case GT:
5142 case GE:
5143 if (has_sign)
5144 return const0_rtx;
5145 break;
5146 default:
5147 break;
5153 /* Optimize comparison of ABS with zero. */
5154 if (trueop1 == CONST0_RTX (mode)
5155 && (GET_CODE (trueop0) == ABS
5156 || (GET_CODE (trueop0) == FLOAT_EXTEND
5157 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5159 switch (code)
5161 case LT:
5162 /* Optimize abs(x) < 0.0. */
5163 if (!HONOR_SNANS (mode)
5164 && (!INTEGRAL_MODE_P (mode)
5165 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5167 if (INTEGRAL_MODE_P (mode)
5168 && (issue_strict_overflow_warning
5169 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5170 warning (OPT_Wstrict_overflow,
5171 ("assuming signed overflow does not occur when "
5172 "assuming abs (x) < 0 is false"));
5173 return const0_rtx;
5175 break;
5177 case GE:
5178 /* Optimize abs(x) >= 0.0. */
5179 if (!HONOR_NANS (mode)
5180 && (!INTEGRAL_MODE_P (mode)
5181 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5183 if (INTEGRAL_MODE_P (mode)
5184 && (issue_strict_overflow_warning
5185 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5186 warning (OPT_Wstrict_overflow,
5187 ("assuming signed overflow does not occur when "
5188 "assuming abs (x) >= 0 is true"));
5189 return const_true_rtx;
5191 break;
5193 case UNGE:
5194 /* Optimize ! (abs(x) < 0.0). */
5195 return const_true_rtx;
5197 default:
5198 break;
5202 return 0;
5205 /* Simplify CODE, an operation with result mode MODE and three operands,
5206 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5207 a constant. Return 0 if no simplifications is possible. */
5210 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5211 enum machine_mode op0_mode, rtx op0, rtx op1,
5212 rtx op2)
5214 unsigned int width = GET_MODE_PRECISION (mode);
5215 bool any_change = false;
5216 rtx tem;
5218 /* VOIDmode means "infinite" precision. */
5219 if (width == 0)
5220 width = HOST_BITS_PER_WIDE_INT;
5222 switch (code)
5224 case FMA:
5225 /* Simplify negations around the multiplication. */
5226 /* -a * -b + c => a * b + c. */
5227 if (GET_CODE (op0) == NEG)
5229 tem = simplify_unary_operation (NEG, mode, op1, mode);
5230 if (tem)
5231 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5233 else if (GET_CODE (op1) == NEG)
5235 tem = simplify_unary_operation (NEG, mode, op0, mode);
5236 if (tem)
5237 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5240 /* Canonicalize the two multiplication operands. */
5241 /* a * -b + c => -b * a + c. */
5242 if (swap_commutative_operands_p (op0, op1))
5243 tem = op0, op0 = op1, op1 = tem, any_change = true;
5245 if (any_change)
5246 return gen_rtx_FMA (mode, op0, op1, op2);
5247 return NULL_RTX;
5249 case SIGN_EXTRACT:
5250 case ZERO_EXTRACT:
5251 if (CONST_INT_P (op0)
5252 && CONST_INT_P (op1)
5253 && CONST_INT_P (op2)
5254 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5255 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5257 /* Extracting a bit-field from a constant */
5258 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5259 HOST_WIDE_INT op1val = INTVAL (op1);
5260 HOST_WIDE_INT op2val = INTVAL (op2);
5261 if (BITS_BIG_ENDIAN)
5262 val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5263 else
5264 val >>= op2val;
5266 if (HOST_BITS_PER_WIDE_INT != op1val)
5268 /* First zero-extend. */
5269 val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5270 /* If desired, propagate sign bit. */
5271 if (code == SIGN_EXTRACT
5272 && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5273 != 0)
5274 val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5277 return gen_int_mode (val, mode);
5279 break;
5281 case IF_THEN_ELSE:
5282 if (CONST_INT_P (op0))
5283 return op0 != const0_rtx ? op1 : op2;
5285 /* Convert c ? a : a into "a". */
5286 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5287 return op1;
5289 /* Convert a != b ? a : b into "a". */
5290 if (GET_CODE (op0) == NE
5291 && ! side_effects_p (op0)
5292 && ! HONOR_NANS (mode)
5293 && ! HONOR_SIGNED_ZEROS (mode)
5294 && ((rtx_equal_p (XEXP (op0, 0), op1)
5295 && rtx_equal_p (XEXP (op0, 1), op2))
5296 || (rtx_equal_p (XEXP (op0, 0), op2)
5297 && rtx_equal_p (XEXP (op0, 1), op1))))
5298 return op1;
5300 /* Convert a == b ? a : b into "b". */
5301 if (GET_CODE (op0) == EQ
5302 && ! side_effects_p (op0)
5303 && ! HONOR_NANS (mode)
5304 && ! HONOR_SIGNED_ZEROS (mode)
5305 && ((rtx_equal_p (XEXP (op0, 0), op1)
5306 && rtx_equal_p (XEXP (op0, 1), op2))
5307 || (rtx_equal_p (XEXP (op0, 0), op2)
5308 && rtx_equal_p (XEXP (op0, 1), op1))))
5309 return op2;
5311 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5313 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5314 ? GET_MODE (XEXP (op0, 1))
5315 : GET_MODE (XEXP (op0, 0)));
5316 rtx temp;
5318 /* Look for happy constants in op1 and op2. */
5319 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5321 HOST_WIDE_INT t = INTVAL (op1);
5322 HOST_WIDE_INT f = INTVAL (op2);
5324 if (t == STORE_FLAG_VALUE && f == 0)
5325 code = GET_CODE (op0);
5326 else if (t == 0 && f == STORE_FLAG_VALUE)
5328 enum rtx_code tmp;
5329 tmp = reversed_comparison_code (op0, NULL_RTX);
5330 if (tmp == UNKNOWN)
5331 break;
5332 code = tmp;
5334 else
5335 break;
5337 return simplify_gen_relational (code, mode, cmp_mode,
5338 XEXP (op0, 0), XEXP (op0, 1));
5341 if (cmp_mode == VOIDmode)
5342 cmp_mode = op0_mode;
5343 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5344 cmp_mode, XEXP (op0, 0),
5345 XEXP (op0, 1));
5347 /* See if any simplifications were possible. */
5348 if (temp)
5350 if (CONST_INT_P (temp))
5351 return temp == const0_rtx ? op2 : op1;
5352 else if (temp)
5353 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5356 break;
5358 case VEC_MERGE:
5359 gcc_assert (GET_MODE (op0) == mode);
5360 gcc_assert (GET_MODE (op1) == mode);
5361 gcc_assert (VECTOR_MODE_P (mode));
5362 op2 = avoid_constant_pool_reference (op2);
5363 if (CONST_INT_P (op2))
5365 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5366 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5367 int mask = (1 << n_elts) - 1;
5369 if (!(INTVAL (op2) & mask))
5370 return op1;
5371 if ((INTVAL (op2) & mask) == mask)
5372 return op0;
5374 op0 = avoid_constant_pool_reference (op0);
5375 op1 = avoid_constant_pool_reference (op1);
5376 if (GET_CODE (op0) == CONST_VECTOR
5377 && GET_CODE (op1) == CONST_VECTOR)
5379 rtvec v = rtvec_alloc (n_elts);
5380 unsigned int i;
5382 for (i = 0; i < n_elts; i++)
5383 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5384 ? CONST_VECTOR_ELT (op0, i)
5385 : CONST_VECTOR_ELT (op1, i));
5386 return gen_rtx_CONST_VECTOR (mode, v);
5389 break;
5391 default:
5392 gcc_unreachable ();
5395 return 0;
5398 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5399 or CONST_VECTOR,
5400 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5402 Works by unpacking OP into a collection of 8-bit values
5403 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5404 and then repacking them again for OUTERMODE. */
5406 static rtx
5407 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5408 enum machine_mode innermode, unsigned int byte)
5410 /* We support up to 512-bit values (for V8DFmode). */
5411 enum {
5412 max_bitsize = 512,
5413 value_bit = 8,
5414 value_mask = (1 << value_bit) - 1
5416 unsigned char value[max_bitsize / value_bit];
5417 int value_start;
5418 int i;
5419 int elem;
5421 int num_elem;
5422 rtx * elems;
5423 int elem_bitsize;
5424 rtx result_s;
5425 rtvec result_v = NULL;
5426 enum mode_class outer_class;
5427 enum machine_mode outer_submode;
5429 /* Some ports misuse CCmode. */
5430 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5431 return op;
5433 /* We have no way to represent a complex constant at the rtl level. */
5434 if (COMPLEX_MODE_P (outermode))
5435 return NULL_RTX;
5437 /* Unpack the value. */
5439 if (GET_CODE (op) == CONST_VECTOR)
5441 num_elem = CONST_VECTOR_NUNITS (op);
5442 elems = &CONST_VECTOR_ELT (op, 0);
5443 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5445 else
5447 num_elem = 1;
5448 elems = &op;
5449 elem_bitsize = max_bitsize;
5451 /* If this asserts, it is too complicated; reducing value_bit may help. */
5452 gcc_assert (BITS_PER_UNIT % value_bit == 0);
5453 /* I don't know how to handle endianness of sub-units. */
5454 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5456 for (elem = 0; elem < num_elem; elem++)
5458 unsigned char * vp;
5459 rtx el = elems[elem];
5461 /* Vectors are kept in target memory order. (This is probably
5462 a mistake.) */
5464 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5465 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5466 / BITS_PER_UNIT);
5467 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5468 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5469 unsigned bytele = (subword_byte % UNITS_PER_WORD
5470 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5471 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5474 switch (GET_CODE (el))
5476 case CONST_INT:
5477 for (i = 0;
5478 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5479 i += value_bit)
5480 *vp++ = INTVAL (el) >> i;
5481 /* CONST_INTs are always logically sign-extended. */
5482 for (; i < elem_bitsize; i += value_bit)
5483 *vp++ = INTVAL (el) < 0 ? -1 : 0;
5484 break;
5486 case CONST_DOUBLE:
5487 if (GET_MODE (el) == VOIDmode)
5489 unsigned char extend = 0;
5490 /* If this triggers, someone should have generated a
5491 CONST_INT instead. */
5492 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5494 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5495 *vp++ = CONST_DOUBLE_LOW (el) >> i;
5496 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5498 *vp++
5499 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5500 i += value_bit;
5503 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5504 extend = -1;
5505 for (; i < elem_bitsize; i += value_bit)
5506 *vp++ = extend;
5508 else
5510 long tmp[max_bitsize / 32];
5511 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5513 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5514 gcc_assert (bitsize <= elem_bitsize);
5515 gcc_assert (bitsize % value_bit == 0);
5517 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5518 GET_MODE (el));
5520 /* real_to_target produces its result in words affected by
5521 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5522 and use WORDS_BIG_ENDIAN instead; see the documentation
5523 of SUBREG in rtl.texi. */
5524 for (i = 0; i < bitsize; i += value_bit)
5526 int ibase;
5527 if (WORDS_BIG_ENDIAN)
5528 ibase = bitsize - 1 - i;
5529 else
5530 ibase = i;
5531 *vp++ = tmp[ibase / 32] >> i % 32;
5534 /* It shouldn't matter what's done here, so fill it with
5535 zero. */
5536 for (; i < elem_bitsize; i += value_bit)
5537 *vp++ = 0;
5539 break;
5541 case CONST_FIXED:
5542 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5544 for (i = 0; i < elem_bitsize; i += value_bit)
5545 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5547 else
5549 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5550 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5551 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5552 i += value_bit)
5553 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5554 >> (i - HOST_BITS_PER_WIDE_INT);
5555 for (; i < elem_bitsize; i += value_bit)
5556 *vp++ = 0;
5558 break;
5560 default:
5561 gcc_unreachable ();
5565 /* Now, pick the right byte to start with. */
5566 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5567 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5568 will already have offset 0. */
5569 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5571 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5572 - byte);
5573 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5574 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5575 byte = (subword_byte % UNITS_PER_WORD
5576 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5579 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5580 so if it's become negative it will instead be very large.) */
5581 gcc_assert (byte < GET_MODE_SIZE (innermode));
5583 /* Convert from bytes to chunks of size value_bit. */
5584 value_start = byte * (BITS_PER_UNIT / value_bit);
5586 /* Re-pack the value. */
5588 if (VECTOR_MODE_P (outermode))
5590 num_elem = GET_MODE_NUNITS (outermode);
5591 result_v = rtvec_alloc (num_elem);
5592 elems = &RTVEC_ELT (result_v, 0);
5593 outer_submode = GET_MODE_INNER (outermode);
5595 else
5597 num_elem = 1;
5598 elems = &result_s;
5599 outer_submode = outermode;
5602 outer_class = GET_MODE_CLASS (outer_submode);
5603 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5605 gcc_assert (elem_bitsize % value_bit == 0);
5606 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5608 for (elem = 0; elem < num_elem; elem++)
5610 unsigned char *vp;
5612 /* Vectors are stored in target memory order. (This is probably
5613 a mistake.) */
5615 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5616 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5617 / BITS_PER_UNIT);
5618 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5619 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5620 unsigned bytele = (subword_byte % UNITS_PER_WORD
5621 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5622 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5625 switch (outer_class)
5627 case MODE_INT:
5628 case MODE_PARTIAL_INT:
5630 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5632 for (i = 0;
5633 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5634 i += value_bit)
5635 lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5636 for (; i < elem_bitsize; i += value_bit)
5637 hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5638 << (i - HOST_BITS_PER_WIDE_INT);
5640 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5641 know why. */
5642 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5643 elems[elem] = gen_int_mode (lo, outer_submode);
5644 else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5645 elems[elem] = immed_double_const (lo, hi, outer_submode);
5646 else
5647 return NULL_RTX;
5649 break;
5651 case MODE_FLOAT:
5652 case MODE_DECIMAL_FLOAT:
5654 REAL_VALUE_TYPE r;
5655 long tmp[max_bitsize / 32];
5657 /* real_from_target wants its input in words affected by
5658 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5659 and use WORDS_BIG_ENDIAN instead; see the documentation
5660 of SUBREG in rtl.texi. */
5661 for (i = 0; i < max_bitsize / 32; i++)
5662 tmp[i] = 0;
5663 for (i = 0; i < elem_bitsize; i += value_bit)
5665 int ibase;
5666 if (WORDS_BIG_ENDIAN)
5667 ibase = elem_bitsize - 1 - i;
5668 else
5669 ibase = i;
5670 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5673 real_from_target (&r, tmp, outer_submode);
5674 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5676 break;
5678 case MODE_FRACT:
5679 case MODE_UFRACT:
5680 case MODE_ACCUM:
5681 case MODE_UACCUM:
5683 FIXED_VALUE_TYPE f;
5684 f.data.low = 0;
5685 f.data.high = 0;
5686 f.mode = outer_submode;
5688 for (i = 0;
5689 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5690 i += value_bit)
5691 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5692 for (; i < elem_bitsize; i += value_bit)
5693 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5694 << (i - HOST_BITS_PER_WIDE_INT));
5696 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5698 break;
5700 default:
5701 gcc_unreachable ();
5704 if (VECTOR_MODE_P (outermode))
5705 return gen_rtx_CONST_VECTOR (outermode, result_v);
5706 else
5707 return result_s;
5710 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5711 Return 0 if no simplifications are possible. */
5713 simplify_subreg (enum machine_mode outermode, rtx op,
5714 enum machine_mode innermode, unsigned int byte)
5716 /* Little bit of sanity checking. */
5717 gcc_assert (innermode != VOIDmode);
5718 gcc_assert (outermode != VOIDmode);
5719 gcc_assert (innermode != BLKmode);
5720 gcc_assert (outermode != BLKmode);
5722 gcc_assert (GET_MODE (op) == innermode
5723 || GET_MODE (op) == VOIDmode);
5725 if ((byte % GET_MODE_SIZE (outermode)) != 0)
5726 return NULL_RTX;
5728 if (byte >= GET_MODE_SIZE (innermode))
5729 return NULL_RTX;
5731 if (outermode == innermode && !byte)
5732 return op;
5734 if (CONST_SCALAR_INT_P (op)
5735 || CONST_DOUBLE_AS_FLOAT_P (op)
5736 || GET_CODE (op) == CONST_FIXED
5737 || GET_CODE (op) == CONST_VECTOR)
5738 return simplify_immed_subreg (outermode, op, innermode, byte);
5740 /* Changing mode twice with SUBREG => just change it once,
5741 or not at all if changing back op starting mode. */
5742 if (GET_CODE (op) == SUBREG)
5744 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5745 int final_offset = byte + SUBREG_BYTE (op);
5746 rtx newx;
5748 if (outermode == innermostmode
5749 && byte == 0 && SUBREG_BYTE (op) == 0)
5750 return SUBREG_REG (op);
5752 /* The SUBREG_BYTE represents offset, as if the value were stored
5753 in memory. Irritating exception is paradoxical subreg, where
5754 we define SUBREG_BYTE to be 0. On big endian machines, this
5755 value should be negative. For a moment, undo this exception. */
5756 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5758 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5759 if (WORDS_BIG_ENDIAN)
5760 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5761 if (BYTES_BIG_ENDIAN)
5762 final_offset += difference % UNITS_PER_WORD;
5764 if (SUBREG_BYTE (op) == 0
5765 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5767 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5768 if (WORDS_BIG_ENDIAN)
5769 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5770 if (BYTES_BIG_ENDIAN)
5771 final_offset += difference % UNITS_PER_WORD;
5774 /* See whether resulting subreg will be paradoxical. */
5775 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5777 /* In nonparadoxical subregs we can't handle negative offsets. */
5778 if (final_offset < 0)
5779 return NULL_RTX;
5780 /* Bail out in case resulting subreg would be incorrect. */
5781 if (final_offset % GET_MODE_SIZE (outermode)
5782 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5783 return NULL_RTX;
5785 else
5787 int offset = 0;
5788 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5790 /* In paradoxical subreg, see if we are still looking on lower part.
5791 If so, our SUBREG_BYTE will be 0. */
5792 if (WORDS_BIG_ENDIAN)
5793 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5794 if (BYTES_BIG_ENDIAN)
5795 offset += difference % UNITS_PER_WORD;
5796 if (offset == final_offset)
5797 final_offset = 0;
5798 else
5799 return NULL_RTX;
5802 /* Recurse for further possible simplifications. */
5803 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5804 final_offset);
5805 if (newx)
5806 return newx;
5807 if (validate_subreg (outermode, innermostmode,
5808 SUBREG_REG (op), final_offset))
5810 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5811 if (SUBREG_PROMOTED_VAR_P (op)
5812 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5813 && GET_MODE_CLASS (outermode) == MODE_INT
5814 && IN_RANGE (GET_MODE_SIZE (outermode),
5815 GET_MODE_SIZE (innermode),
5816 GET_MODE_SIZE (innermostmode))
5817 && subreg_lowpart_p (newx))
5819 SUBREG_PROMOTED_VAR_P (newx) = 1;
5820 SUBREG_PROMOTED_UNSIGNED_SET
5821 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5823 return newx;
5825 return NULL_RTX;
5828 /* SUBREG of a hard register => just change the register number
5829 and/or mode. If the hard register is not valid in that mode,
5830 suppress this simplification. If the hard register is the stack,
5831 frame, or argument pointer, leave this as a SUBREG. */
5833 if (REG_P (op) && HARD_REGISTER_P (op))
5835 unsigned int regno, final_regno;
5837 regno = REGNO (op);
5838 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5839 if (HARD_REGISTER_NUM_P (final_regno))
5841 rtx x;
5842 int final_offset = byte;
5844 /* Adjust offset for paradoxical subregs. */
5845 if (byte == 0
5846 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5848 int difference = (GET_MODE_SIZE (innermode)
5849 - GET_MODE_SIZE (outermode));
5850 if (WORDS_BIG_ENDIAN)
5851 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5852 if (BYTES_BIG_ENDIAN)
5853 final_offset += difference % UNITS_PER_WORD;
5856 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5858 /* Propagate original regno. We don't have any way to specify
5859 the offset inside original regno, so do so only for lowpart.
5860 The information is used only by alias analysis that can not
5861 grog partial register anyway. */
5863 if (subreg_lowpart_offset (outermode, innermode) == byte)
5864 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5865 return x;
5869 /* If we have a SUBREG of a register that we are replacing and we are
5870 replacing it with a MEM, make a new MEM and try replacing the
5871 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5872 or if we would be widening it. */
5874 if (MEM_P (op)
5875 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5876 /* Allow splitting of volatile memory references in case we don't
5877 have instruction to move the whole thing. */
5878 && (! MEM_VOLATILE_P (op)
5879 || ! have_insn_for (SET, innermode))
5880 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5881 return adjust_address_nv (op, outermode, byte);
5883 /* Handle complex values represented as CONCAT
5884 of real and imaginary part. */
5885 if (GET_CODE (op) == CONCAT)
5887 unsigned int part_size, final_offset;
5888 rtx part, res;
5890 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5891 if (byte < part_size)
5893 part = XEXP (op, 0);
5894 final_offset = byte;
5896 else
5898 part = XEXP (op, 1);
5899 final_offset = byte - part_size;
5902 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5903 return NULL_RTX;
5905 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5906 if (res)
5907 return res;
5908 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5909 return gen_rtx_SUBREG (outermode, part, final_offset);
5910 return NULL_RTX;
5913 /* A SUBREG resulting from a zero extension may fold to zero if
5914 it extracts higher bits that the ZERO_EXTEND's source bits. */
5915 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5917 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5918 if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5919 return CONST0_RTX (outermode);
5922 if (SCALAR_INT_MODE_P (outermode)
5923 && SCALAR_INT_MODE_P (innermode)
5924 && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5925 && byte == subreg_lowpart_offset (outermode, innermode))
5927 rtx tem = simplify_truncation (outermode, op, innermode);
5928 if (tem)
5929 return tem;
5932 return NULL_RTX;
5935 /* Make a SUBREG operation or equivalent if it folds. */
5938 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5939 enum machine_mode innermode, unsigned int byte)
5941 rtx newx;
5943 newx = simplify_subreg (outermode, op, innermode, byte);
5944 if (newx)
5945 return newx;
5947 if (GET_CODE (op) == SUBREG
5948 || GET_CODE (op) == CONCAT
5949 || GET_MODE (op) == VOIDmode)
5950 return NULL_RTX;
5952 if (validate_subreg (outermode, innermode, op, byte))
5953 return gen_rtx_SUBREG (outermode, op, byte);
5955 return NULL_RTX;
5958 /* Simplify X, an rtx expression.
5960 Return the simplified expression or NULL if no simplifications
5961 were possible.
5963 This is the preferred entry point into the simplification routines;
5964 however, we still allow passes to call the more specific routines.
5966 Right now GCC has three (yes, three) major bodies of RTL simplification
5967 code that need to be unified.
5969 1. fold_rtx in cse.c. This code uses various CSE specific
5970 information to aid in RTL simplification.
5972 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5973 it uses combine specific information to aid in RTL
5974 simplification.
5976 3. The routines in this file.
5979 Long term we want to only have one body of simplification code; to
5980 get to that state I recommend the following steps:
5982 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5983 which are not pass dependent state into these routines.
5985 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5986 use this routine whenever possible.
5988 3. Allow for pass dependent state to be provided to these
5989 routines and add simplifications based on the pass dependent
5990 state. Remove code from cse.c & combine.c that becomes
5991 redundant/dead.
5993 It will take time, but ultimately the compiler will be easier to
5994 maintain and improve. It's totally silly that when we add a
5995 simplification that it needs to be added to 4 places (3 for RTL
5996 simplification and 1 for tree simplification. */
5999 simplify_rtx (const_rtx x)
6001 const enum rtx_code code = GET_CODE (x);
6002 const enum machine_mode mode = GET_MODE (x);
6004 switch (GET_RTX_CLASS (code))
6006 case RTX_UNARY:
6007 return simplify_unary_operation (code, mode,
6008 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6009 case RTX_COMM_ARITH:
6010 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6011 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6013 /* Fall through.... */
6015 case RTX_BIN_ARITH:
6016 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6018 case RTX_TERNARY:
6019 case RTX_BITFIELD_OPS:
6020 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6021 XEXP (x, 0), XEXP (x, 1),
6022 XEXP (x, 2));
6024 case RTX_COMPARE:
6025 case RTX_COMM_COMPARE:
6026 return simplify_relational_operation (code, mode,
6027 ((GET_MODE (XEXP (x, 0))
6028 != VOIDmode)
6029 ? GET_MODE (XEXP (x, 0))
6030 : GET_MODE (XEXP (x, 1))),
6031 XEXP (x, 0),
6032 XEXP (x, 1));
6034 case RTX_EXTRA:
6035 if (code == SUBREG)
6036 return simplify_subreg (mode, SUBREG_REG (x),
6037 GET_MODE (SUBREG_REG (x)),
6038 SUBREG_BYTE (x));
6039 break;
6041 case RTX_OBJ:
6042 if (code == LO_SUM)
6044 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6045 if (GET_CODE (XEXP (x, 0)) == HIGH
6046 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6047 return XEXP (x, 1);
6049 break;
6051 default:
6052 break;
6054 return NULL;