Fix typos in comment
[official-gcc.git] / gcc / simplify-rtx.c
blob67f7babfc1ffd395b9e3c123fab119da0e3cebad
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "obstack.h"
41 #include "hashtab.h"
42 #include "cselib.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
47 virtual regs here because the simplify_*_operation routines are called
48 by integrate.c, which is called before virtual register instantiation.
50 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
51 a header file so that their definitions can be shared with the
52 simplification routines in simplify-rtx.c. Until then, do not
53 change these macros without also changing the copy in simplify-rtx.c. */
55 #define FIXED_BASE_PLUS_P(X) \
56 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
57 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
58 || (X) == virtual_stack_vars_rtx \
59 || (X) == virtual_incoming_args_rtx \
60 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
61 && (XEXP (X, 0) == frame_pointer_rtx \
62 || XEXP (X, 0) == hard_frame_pointer_rtx \
63 || ((X) == arg_pointer_rtx \
64 && fixed_regs[ARG_POINTER_REGNUM]) \
65 || XEXP (X, 0) == virtual_stack_vars_rtx \
66 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
67 || GET_CODE (X) == ADDRESSOF)
69 /* Similar, but also allows reference to the stack pointer.
71 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
72 arg_pointer_rtx by itself is nonzero, because on at least one machine,
73 the i960, the arg pointer is zero when it is unused. */
75 #define NONZERO_BASE_PLUS_P(X) \
76 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
77 || (X) == virtual_stack_vars_rtx \
78 || (X) == virtual_incoming_args_rtx \
79 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
80 && (XEXP (X, 0) == frame_pointer_rtx \
81 || XEXP (X, 0) == hard_frame_pointer_rtx \
82 || ((X) == arg_pointer_rtx \
83 && fixed_regs[ARG_POINTER_REGNUM]) \
84 || XEXP (X, 0) == virtual_stack_vars_rtx \
85 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
86 || (X) == stack_pointer_rtx \
87 || (X) == virtual_stack_dynamic_rtx \
88 || (X) == virtual_outgoing_args_rtx \
89 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
90 && (XEXP (X, 0) == stack_pointer_rtx \
91 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
92 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
93 || GET_CODE (X) == ADDRESSOF)
95 /* Much code operates on (low, high) pairs; the low value is an
96 unsigned wide int, the high value a signed wide int. We
97 occasionally need to sign extend from low to high as if low were a
98 signed wide int. */
99 #define HWI_SIGN_EXTEND(low) \
100 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
102 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
103 enum machine_mode, rtx, rtx));
104 static void check_fold_consts PARAMS ((PTR));
105 static int entry_and_rtx_equal_p PARAMS ((const void *, const void *));
106 static unsigned int get_value_hash PARAMS ((const void *));
107 static struct elt_list *new_elt_list PARAMS ((struct elt_list *,
108 cselib_val *));
109 static struct elt_loc_list *new_elt_loc_list PARAMS ((struct elt_loc_list *,
110 rtx));
111 static void unchain_one_value PARAMS ((cselib_val *));
112 static void unchain_one_elt_list PARAMS ((struct elt_list **));
113 static void unchain_one_elt_loc_list PARAMS ((struct elt_loc_list **));
114 static void clear_table PARAMS ((void));
115 static int discard_useless_locs PARAMS ((void **, void *));
116 static int discard_useless_values PARAMS ((void **, void *));
117 static void remove_useless_values PARAMS ((void));
118 static unsigned int hash_rtx PARAMS ((rtx, enum machine_mode, int));
119 static cselib_val *new_cselib_val PARAMS ((unsigned int,
120 enum machine_mode));
121 static void add_mem_for_addr PARAMS ((cselib_val *, cselib_val *,
122 rtx));
123 static cselib_val *cselib_lookup_mem PARAMS ((rtx, int));
124 static rtx cselib_subst_to_values PARAMS ((rtx));
125 static void cselib_invalidate_regno PARAMS ((unsigned int,
126 enum machine_mode));
127 static int cselib_mem_conflict_p PARAMS ((rtx, rtx));
128 static int cselib_invalidate_mem_1 PARAMS ((void **, void *));
129 static void cselib_invalidate_mem PARAMS ((rtx));
130 static void cselib_invalidate_rtx PARAMS ((rtx, rtx, void *));
131 static void cselib_record_set PARAMS ((rtx, cselib_val *,
132 cselib_val *));
133 static void cselib_record_sets PARAMS ((rtx));
135 /* There are three ways in which cselib can look up an rtx:
136 - for a REG, the reg_values table (which is indexed by regno) is used
137 - for a MEM, we recursively look up its address and then follow the
138 addr_list of that value
139 - for everything else, we compute a hash value and go through the hash
140 table. Since different rtx's can still have the same hash value,
141 this involves walking the table entries for a given value and comparing
142 the locations of the entries with the rtx we are looking up. */
144 /* A table that enables us to look up elts by their value. */
145 static htab_t hash_table;
147 /* This is a global so we don't have to pass this through every function.
148 It is used in new_elt_loc_list to set SETTING_INSN. */
149 static rtx cselib_current_insn;
151 /* Every new unknown value gets a unique number. */
152 static unsigned int next_unknown_value;
154 /* The number of registers we had when the varrays were last resized. */
155 static unsigned int cselib_nregs;
157 /* Count values without known locations. Whenever this grows too big, we
158 remove these useless values from the table. */
159 static int n_useless_values;
161 /* Number of useless values before we remove them from the hash table. */
162 #define MAX_USELESS_VALUES 32
164 /* This table maps from register number to values. It does not contain
165 pointers to cselib_val structures, but rather elt_lists. The purpose is
166 to be able to refer to the same register in different modes. */
167 static varray_type reg_values;
168 #define REG_VALUES(I) VARRAY_ELT_LIST (reg_values, (I))
170 /* We pass this to cselib_invalidate_mem to invalidate all of
171 memory for a non-const call instruction. */
172 static rtx callmem;
174 /* Memory for our structures is allocated from this obstack. */
175 static struct obstack cselib_obstack;
177 /* Used to quickly free all memory. */
178 static char *cselib_startobj;
180 /* Caches for unused structures. */
181 static cselib_val *empty_vals;
182 static struct elt_list *empty_elt_lists;
183 static struct elt_loc_list *empty_elt_loc_lists;
185 /* Set by discard_useless_locs if it deleted the last location of any
186 value. */
187 static int values_became_useless;
189 /* Make a binary operation by properly ordering the operands and
190 seeing if the expression folds. */
193 simplify_gen_binary (code, mode, op0, op1)
194 enum rtx_code code;
195 enum machine_mode mode;
196 rtx op0, op1;
198 rtx tem;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code) == 'c'
202 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
203 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
204 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
205 || (GET_CODE (op0) == SUBREG
206 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
207 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
208 tem = op0, op0 = op1, op1 = tem;
210 /* If this simplifies, do it. */
211 tem = simplify_binary_operation (code, mode, op0, op1);
213 if (tem)
214 return tem;
216 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
217 just form the operation. */
219 if (code == PLUS && GET_CODE (op1) == CONST_INT
220 && GET_MODE (op0) != VOIDmode)
221 return plus_constant (op0, INTVAL (op1));
222 else if (code == MINUS && GET_CODE (op1) == CONST_INT
223 && GET_MODE (op0) != VOIDmode)
224 return plus_constant (op0, - INTVAL (op1));
225 else
226 return gen_rtx_fmt_ee (code, mode, op0, op1);
229 /* Try to simplify a unary operation CODE whose output mode is to be
230 MODE with input operand OP whose mode was originally OP_MODE.
231 Return zero if no simplification can be made. */
234 simplify_unary_operation (code, mode, op, op_mode)
235 enum rtx_code code;
236 enum machine_mode mode;
237 rtx op;
238 enum machine_mode op_mode;
240 unsigned int width = GET_MODE_BITSIZE (mode);
242 /* The order of these tests is critical so that, for example, we don't
243 check the wrong mode (input vs. output) for a conversion operation,
244 such as FIX. At some point, this should be simplified. */
246 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
248 if (code == FLOAT && GET_MODE (op) == VOIDmode
249 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
251 HOST_WIDE_INT hv, lv;
252 REAL_VALUE_TYPE d;
254 if (GET_CODE (op) == CONST_INT)
255 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
256 else
257 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
259 #ifdef REAL_ARITHMETIC
260 REAL_VALUE_FROM_INT (d, lv, hv, mode);
261 #else
262 if (hv < 0)
264 d = (double) (~ hv);
265 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
266 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
267 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
268 d = (- d - 1.0);
270 else
272 d = (double) hv;
273 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
274 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
275 d += (double) (unsigned HOST_WIDE_INT) lv;
277 #endif /* REAL_ARITHMETIC */
278 d = real_value_truncate (mode, d);
279 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
281 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
282 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
284 HOST_WIDE_INT hv, lv;
285 REAL_VALUE_TYPE d;
287 if (GET_CODE (op) == CONST_INT)
288 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
289 else
290 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
292 if (op_mode == VOIDmode)
294 /* We don't know how to interpret negative-looking numbers in
295 this case, so don't try to fold those. */
296 if (hv < 0)
297 return 0;
299 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
301 else
302 hv = 0, lv &= GET_MODE_MASK (op_mode);
304 #ifdef REAL_ARITHMETIC
305 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
306 #else
308 d = (double) (unsigned HOST_WIDE_INT) hv;
309 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
310 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
311 d += (double) (unsigned HOST_WIDE_INT) lv;
312 #endif /* REAL_ARITHMETIC */
313 d = real_value_truncate (mode, d);
314 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
316 #endif
318 if (GET_CODE (op) == CONST_INT
319 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
321 register HOST_WIDE_INT arg0 = INTVAL (op);
322 register HOST_WIDE_INT val;
324 switch (code)
326 case NOT:
327 val = ~ arg0;
328 break;
330 case NEG:
331 val = - arg0;
332 break;
334 case ABS:
335 val = (arg0 >= 0 ? arg0 : - arg0);
336 break;
338 case FFS:
339 /* Don't use ffs here. Instead, get low order bit and then its
340 number. If arg0 is zero, this will return 0, as desired. */
341 arg0 &= GET_MODE_MASK (mode);
342 val = exact_log2 (arg0 & (- arg0)) + 1;
343 break;
345 case TRUNCATE:
346 val = arg0;
347 break;
349 case ZERO_EXTEND:
350 if (op_mode == VOIDmode)
351 op_mode = mode;
352 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
354 /* If we were really extending the mode,
355 we would have to distinguish between zero-extension
356 and sign-extension. */
357 if (width != GET_MODE_BITSIZE (op_mode))
358 abort ();
359 val = arg0;
361 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
362 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
363 else
364 return 0;
365 break;
367 case SIGN_EXTEND:
368 if (op_mode == VOIDmode)
369 op_mode = mode;
370 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
372 /* If we were really extending the mode,
373 we would have to distinguish between zero-extension
374 and sign-extension. */
375 if (width != GET_MODE_BITSIZE (op_mode))
376 abort ();
377 val = arg0;
379 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
382 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
383 if (val
384 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
385 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
387 else
388 return 0;
389 break;
391 case SQRT:
392 return 0;
394 default:
395 abort ();
398 val = trunc_int_for_mode (val, mode);
400 return GEN_INT (val);
403 /* We can do some operations on integer CONST_DOUBLEs. Also allow
404 for a DImode operation on a CONST_INT. */
405 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
406 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
408 unsigned HOST_WIDE_INT l1, lv;
409 HOST_WIDE_INT h1, hv;
411 if (GET_CODE (op) == CONST_DOUBLE)
412 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
413 else
414 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
416 switch (code)
418 case NOT:
419 lv = ~ l1;
420 hv = ~ h1;
421 break;
423 case NEG:
424 neg_double (l1, h1, &lv, &hv);
425 break;
427 case ABS:
428 if (h1 < 0)
429 neg_double (l1, h1, &lv, &hv);
430 else
431 lv = l1, hv = h1;
432 break;
434 case FFS:
435 hv = 0;
436 if (l1 == 0)
437 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
438 else
439 lv = exact_log2 (l1 & (-l1)) + 1;
440 break;
442 case TRUNCATE:
443 /* This is just a change-of-mode, so do nothing. */
444 lv = l1, hv = h1;
445 break;
447 case ZERO_EXTEND:
448 if (op_mode == VOIDmode
449 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
450 return 0;
452 hv = 0;
453 lv = l1 & GET_MODE_MASK (op_mode);
454 break;
456 case SIGN_EXTEND:
457 if (op_mode == VOIDmode
458 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
459 return 0;
460 else
462 lv = l1 & GET_MODE_MASK (op_mode);
463 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
464 && (lv & ((HOST_WIDE_INT) 1
465 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
466 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
468 hv = HWI_SIGN_EXTEND (lv);
470 break;
472 case SQRT:
473 return 0;
475 default:
476 return 0;
479 return immed_double_const (lv, hv, mode);
482 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
483 else if (GET_CODE (op) == CONST_DOUBLE
484 && GET_MODE_CLASS (mode) == MODE_FLOAT)
486 REAL_VALUE_TYPE d;
487 jmp_buf handler;
488 rtx x;
490 if (setjmp (handler))
491 /* There used to be a warning here, but that is inadvisable.
492 People may want to cause traps, and the natural way
493 to do it should not get a warning. */
494 return 0;
496 set_float_handler (handler);
498 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
500 switch (code)
502 case NEG:
503 d = REAL_VALUE_NEGATE (d);
504 break;
506 case ABS:
507 if (REAL_VALUE_NEGATIVE (d))
508 d = REAL_VALUE_NEGATE (d);
509 break;
511 case FLOAT_TRUNCATE:
512 d = real_value_truncate (mode, d);
513 break;
515 case FLOAT_EXTEND:
516 /* All this does is change the mode. */
517 break;
519 case FIX:
520 d = REAL_VALUE_RNDZINT (d);
521 break;
523 case UNSIGNED_FIX:
524 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
525 break;
527 case SQRT:
528 return 0;
530 default:
531 abort ();
534 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
535 set_float_handler (NULL_PTR);
536 return x;
539 else if (GET_CODE (op) == CONST_DOUBLE
540 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
541 && GET_MODE_CLASS (mode) == MODE_INT
542 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
544 REAL_VALUE_TYPE d;
545 jmp_buf handler;
546 HOST_WIDE_INT val;
548 if (setjmp (handler))
549 return 0;
551 set_float_handler (handler);
553 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
555 switch (code)
557 case FIX:
558 val = REAL_VALUE_FIX (d);
559 break;
561 case UNSIGNED_FIX:
562 val = REAL_VALUE_UNSIGNED_FIX (d);
563 break;
565 default:
566 abort ();
569 set_float_handler (NULL_PTR);
571 val = trunc_int_for_mode (val, mode);
573 return GEN_INT (val);
575 #endif
576 /* This was formerly used only for non-IEEE float.
577 eggert@twinsun.com says it is safe for IEEE also. */
578 else
580 /* There are some simplifications we can do even if the operands
581 aren't constant. */
582 switch (code)
584 case NEG:
585 case NOT:
586 /* (not (not X)) == X, similarly for NEG. */
587 if (GET_CODE (op) == code)
588 return XEXP (op, 0);
589 break;
591 case SIGN_EXTEND:
592 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
593 becomes just the MINUS if its mode is MODE. This allows
594 folding switch statements on machines using casesi (such as
595 the Vax). */
596 if (GET_CODE (op) == TRUNCATE
597 && GET_MODE (XEXP (op, 0)) == mode
598 && GET_CODE (XEXP (op, 0)) == MINUS
599 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
600 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
601 return XEXP (op, 0);
603 #ifdef POINTERS_EXTEND_UNSIGNED
604 if (! POINTERS_EXTEND_UNSIGNED
605 && mode == Pmode && GET_MODE (op) == ptr_mode
606 && CONSTANT_P (op))
607 return convert_memory_address (Pmode, op);
608 #endif
609 break;
611 #ifdef POINTERS_EXTEND_UNSIGNED
612 case ZERO_EXTEND:
613 if (POINTERS_EXTEND_UNSIGNED
614 && mode == Pmode && GET_MODE (op) == ptr_mode
615 && CONSTANT_P (op))
616 return convert_memory_address (Pmode, op);
617 break;
618 #endif
620 default:
621 break;
624 return 0;
628 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
629 and OP1. Return 0 if no simplification is possible.
631 Don't use this for relational operations such as EQ or LT.
632 Use simplify_relational_operation instead. */
635 simplify_binary_operation (code, mode, op0, op1)
636 enum rtx_code code;
637 enum machine_mode mode;
638 rtx op0, op1;
640 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
641 HOST_WIDE_INT val;
642 unsigned int width = GET_MODE_BITSIZE (mode);
643 rtx tem;
645 /* Relational operations don't work here. We must know the mode
646 of the operands in order to do the comparison correctly.
647 Assuming a full word can give incorrect results.
648 Consider comparing 128 with -128 in QImode. */
650 if (GET_RTX_CLASS (code) == '<')
651 abort ();
653 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
654 if (GET_MODE_CLASS (mode) == MODE_FLOAT
655 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
656 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
658 REAL_VALUE_TYPE f0, f1, value;
659 jmp_buf handler;
661 if (setjmp (handler))
662 return 0;
664 set_float_handler (handler);
666 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
667 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
668 f0 = real_value_truncate (mode, f0);
669 f1 = real_value_truncate (mode, f1);
671 #ifdef REAL_ARITHMETIC
672 #ifndef REAL_INFINITY
673 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
674 return 0;
675 #endif
676 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
677 #else
678 switch (code)
680 case PLUS:
681 value = f0 + f1;
682 break;
683 case MINUS:
684 value = f0 - f1;
685 break;
686 case MULT:
687 value = f0 * f1;
688 break;
689 case DIV:
690 #ifndef REAL_INFINITY
691 if (f1 == 0)
692 return 0;
693 #endif
694 value = f0 / f1;
695 break;
696 case SMIN:
697 value = MIN (f0, f1);
698 break;
699 case SMAX:
700 value = MAX (f0, f1);
701 break;
702 default:
703 abort ();
705 #endif
707 value = real_value_truncate (mode, value);
708 set_float_handler (NULL_PTR);
709 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
711 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
713 /* We can fold some multi-word operations. */
714 if (GET_MODE_CLASS (mode) == MODE_INT
715 && width == HOST_BITS_PER_WIDE_INT * 2
716 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
717 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
719 unsigned HOST_WIDE_INT l1, l2, lv;
720 HOST_WIDE_INT h1, h2, hv;
722 if (GET_CODE (op0) == CONST_DOUBLE)
723 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
724 else
725 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
727 if (GET_CODE (op1) == CONST_DOUBLE)
728 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
729 else
730 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
732 switch (code)
734 case MINUS:
735 /* A - B == A + (-B). */
736 neg_double (l2, h2, &lv, &hv);
737 l2 = lv, h2 = hv;
739 /* .. fall through ... */
741 case PLUS:
742 add_double (l1, h1, l2, h2, &lv, &hv);
743 break;
745 case MULT:
746 mul_double (l1, h1, l2, h2, &lv, &hv);
747 break;
749 case DIV: case MOD: case UDIV: case UMOD:
750 /* We'd need to include tree.h to do this and it doesn't seem worth
751 it. */
752 return 0;
754 case AND:
755 lv = l1 & l2, hv = h1 & h2;
756 break;
758 case IOR:
759 lv = l1 | l2, hv = h1 | h2;
760 break;
762 case XOR:
763 lv = l1 ^ l2, hv = h1 ^ h2;
764 break;
766 case SMIN:
767 if (h1 < h2
768 || (h1 == h2
769 && ((unsigned HOST_WIDE_INT) l1
770 < (unsigned HOST_WIDE_INT) l2)))
771 lv = l1, hv = h1;
772 else
773 lv = l2, hv = h2;
774 break;
776 case SMAX:
777 if (h1 > h2
778 || (h1 == h2
779 && ((unsigned HOST_WIDE_INT) l1
780 > (unsigned HOST_WIDE_INT) l2)))
781 lv = l1, hv = h1;
782 else
783 lv = l2, hv = h2;
784 break;
786 case UMIN:
787 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
788 || (h1 == h2
789 && ((unsigned HOST_WIDE_INT) l1
790 < (unsigned HOST_WIDE_INT) l2)))
791 lv = l1, hv = h1;
792 else
793 lv = l2, hv = h2;
794 break;
796 case UMAX:
797 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
798 || (h1 == h2
799 && ((unsigned HOST_WIDE_INT) l1
800 > (unsigned HOST_WIDE_INT) l2)))
801 lv = l1, hv = h1;
802 else
803 lv = l2, hv = h2;
804 break;
806 case LSHIFTRT: case ASHIFTRT:
807 case ASHIFT:
808 case ROTATE: case ROTATERT:
809 #ifdef SHIFT_COUNT_TRUNCATED
810 if (SHIFT_COUNT_TRUNCATED)
811 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
812 #endif
814 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
815 return 0;
817 if (code == LSHIFTRT || code == ASHIFTRT)
818 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
819 code == ASHIFTRT);
820 else if (code == ASHIFT)
821 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
822 else if (code == ROTATE)
823 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
824 else /* code == ROTATERT */
825 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
826 break;
828 default:
829 return 0;
832 return immed_double_const (lv, hv, mode);
835 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
836 || width > HOST_BITS_PER_WIDE_INT || width == 0)
838 /* Even if we can't compute a constant result,
839 there are some cases worth simplifying. */
841 switch (code)
843 case PLUS:
844 /* In IEEE floating point, x+0 is not the same as x. Similarly
845 for the other optimizations below. */
846 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
847 && FLOAT_MODE_P (mode) && ! flag_fast_math)
848 break;
850 if (op1 == CONST0_RTX (mode))
851 return op0;
853 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
854 if (GET_CODE (op0) == NEG)
855 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
856 else if (GET_CODE (op1) == NEG)
857 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
859 /* Handle both-operands-constant cases. We can only add
860 CONST_INTs to constants since the sum of relocatable symbols
861 can't be handled by most assemblers. Don't add CONST_INT
862 to CONST_INT since overflow won't be computed properly if wider
863 than HOST_BITS_PER_WIDE_INT. */
865 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
866 && GET_CODE (op1) == CONST_INT)
867 return plus_constant (op0, INTVAL (op1));
868 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
869 && GET_CODE (op0) == CONST_INT)
870 return plus_constant (op1, INTVAL (op0));
872 /* See if this is something like X * C - X or vice versa or
873 if the multiplication is written as a shift. If so, we can
874 distribute and make a new multiply, shift, or maybe just
875 have X (if C is 2 in the example above). But don't make
876 real multiply if we didn't have one before. */
878 if (! FLOAT_MODE_P (mode))
880 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
881 rtx lhs = op0, rhs = op1;
882 int had_mult = 0;
884 if (GET_CODE (lhs) == NEG)
885 coeff0 = -1, lhs = XEXP (lhs, 0);
886 else if (GET_CODE (lhs) == MULT
887 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
889 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
890 had_mult = 1;
892 else if (GET_CODE (lhs) == ASHIFT
893 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
894 && INTVAL (XEXP (lhs, 1)) >= 0
895 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
897 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
898 lhs = XEXP (lhs, 0);
901 if (GET_CODE (rhs) == NEG)
902 coeff1 = -1, rhs = XEXP (rhs, 0);
903 else if (GET_CODE (rhs) == MULT
904 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
906 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
907 had_mult = 1;
909 else if (GET_CODE (rhs) == ASHIFT
910 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
911 && INTVAL (XEXP (rhs, 1)) >= 0
912 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
914 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
915 rhs = XEXP (rhs, 0);
918 if (rtx_equal_p (lhs, rhs))
920 tem = simplify_gen_binary (MULT, mode, lhs,
921 GEN_INT (coeff0 + coeff1));
922 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
926 /* If one of the operands is a PLUS or a MINUS, see if we can
927 simplify this by the associative law.
928 Don't use the associative law for floating point.
929 The inaccuracy makes it nonassociative,
930 and subtle programs can break if operations are associated. */
932 if (INTEGRAL_MODE_P (mode)
933 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
934 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
935 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
936 return tem;
937 break;
939 case COMPARE:
940 #ifdef HAVE_cc0
941 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
942 using cc0, in which case we want to leave it as a COMPARE
943 so we can distinguish it from a register-register-copy.
945 In IEEE floating point, x-0 is not the same as x. */
947 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
948 || ! FLOAT_MODE_P (mode) || flag_fast_math)
949 && op1 == CONST0_RTX (mode))
950 return op0;
951 #endif
953 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
954 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
955 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
956 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
958 rtx xop00 = XEXP (op0, 0);
959 rtx xop10 = XEXP (op1, 0);
961 #ifdef HAVE_cc0
962 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
963 #else
964 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
965 && GET_MODE (xop00) == GET_MODE (xop10)
966 && REGNO (xop00) == REGNO (xop10)
967 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
968 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
969 #endif
970 return xop00;
973 break;
974 case MINUS:
975 /* None of these optimizations can be done for IEEE
976 floating point. */
977 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
978 && FLOAT_MODE_P (mode) && ! flag_fast_math)
979 break;
981 /* We can't assume x-x is 0 even with non-IEEE floating point,
982 but since it is zero except in very strange circumstances, we
983 will treat it as zero with -ffast-math. */
984 if (rtx_equal_p (op0, op1)
985 && ! side_effects_p (op0)
986 && (! FLOAT_MODE_P (mode) || flag_fast_math))
987 return CONST0_RTX (mode);
989 /* Change subtraction from zero into negation. */
990 if (op0 == CONST0_RTX (mode))
991 return gen_rtx_NEG (mode, op1);
993 /* (-1 - a) is ~a. */
994 if (op0 == constm1_rtx)
995 return gen_rtx_NOT (mode, op1);
997 /* Subtracting 0 has no effect. */
998 if (op1 == CONST0_RTX (mode))
999 return op0;
1001 /* See if this is something like X * C - X or vice versa or
1002 if the multiplication is written as a shift. If so, we can
1003 distribute and make a new multiply, shift, or maybe just
1004 have X (if C is 2 in the example above). But don't make
1005 real multiply if we didn't have one before. */
1007 if (! FLOAT_MODE_P (mode))
1009 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1010 rtx lhs = op0, rhs = op1;
1011 int had_mult = 0;
1013 if (GET_CODE (lhs) == NEG)
1014 coeff0 = -1, lhs = XEXP (lhs, 0);
1015 else if (GET_CODE (lhs) == MULT
1016 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1018 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1019 had_mult = 1;
1021 else if (GET_CODE (lhs) == ASHIFT
1022 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1023 && INTVAL (XEXP (lhs, 1)) >= 0
1024 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1026 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1027 lhs = XEXP (lhs, 0);
1030 if (GET_CODE (rhs) == NEG)
1031 coeff1 = - 1, rhs = XEXP (rhs, 0);
1032 else if (GET_CODE (rhs) == MULT
1033 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1035 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1036 had_mult = 1;
1038 else if (GET_CODE (rhs) == ASHIFT
1039 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1040 && INTVAL (XEXP (rhs, 1)) >= 0
1041 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1043 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1044 rhs = XEXP (rhs, 0);
1047 if (rtx_equal_p (lhs, rhs))
1049 tem = simplify_gen_binary (MULT, mode, lhs,
1050 GEN_INT (coeff0 - coeff1));
1051 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1055 /* (a - (-b)) -> (a + b). */
1056 if (GET_CODE (op1) == NEG)
1057 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1059 /* If one of the operands is a PLUS or a MINUS, see if we can
1060 simplify this by the associative law.
1061 Don't use the associative law for floating point.
1062 The inaccuracy makes it nonassociative,
1063 and subtle programs can break if operations are associated. */
1065 if (INTEGRAL_MODE_P (mode)
1066 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1067 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1068 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1069 return tem;
1071 /* Don't let a relocatable value get a negative coeff. */
1072 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1073 return plus_constant (op0, - INTVAL (op1));
1075 /* (x - (x & y)) -> (x & ~y) */
1076 if (GET_CODE (op1) == AND)
1078 if (rtx_equal_p (op0, XEXP (op1, 0)))
1079 return simplify_gen_binary (AND, mode, op0,
1080 gen_rtx_NOT (mode, XEXP (op1, 1)));
1081 if (rtx_equal_p (op0, XEXP (op1, 1)))
1082 return simplify_gen_binary (AND, mode, op0,
1083 gen_rtx_NOT (mode, XEXP (op1, 0)));
1085 break;
1087 case MULT:
1088 if (op1 == constm1_rtx)
1090 tem = simplify_unary_operation (NEG, mode, op0, mode);
1092 return tem ? tem : gen_rtx_NEG (mode, op0);
1095 /* In IEEE floating point, x*0 is not always 0. */
1096 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1097 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1098 && op1 == CONST0_RTX (mode)
1099 && ! side_effects_p (op0))
1100 return op1;
1102 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1103 However, ANSI says we can drop signals,
1104 so we can do this anyway. */
1105 if (op1 == CONST1_RTX (mode))
1106 return op0;
1108 /* Convert multiply by constant power of two into shift unless
1109 we are still generating RTL. This test is a kludge. */
1110 if (GET_CODE (op1) == CONST_INT
1111 && (val = exact_log2 (INTVAL (op1))) >= 0
1112 /* If the mode is larger than the host word size, and the
1113 uppermost bit is set, then this isn't a power of two due
1114 to implicit sign extension. */
1115 && (width <= HOST_BITS_PER_WIDE_INT
1116 || val != HOST_BITS_PER_WIDE_INT - 1)
1117 && ! rtx_equal_function_value_matters)
1118 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1120 if (GET_CODE (op1) == CONST_DOUBLE
1121 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1123 REAL_VALUE_TYPE d;
1124 jmp_buf handler;
1125 int op1is2, op1ism1;
1127 if (setjmp (handler))
1128 return 0;
1130 set_float_handler (handler);
1131 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1132 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1133 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1134 set_float_handler (NULL_PTR);
1136 /* x*2 is x+x and x*(-1) is -x */
1137 if (op1is2 && GET_MODE (op0) == mode)
1138 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1140 else if (op1ism1 && GET_MODE (op0) == mode)
1141 return gen_rtx_NEG (mode, op0);
1143 break;
1145 case IOR:
1146 if (op1 == const0_rtx)
1147 return op0;
1148 if (GET_CODE (op1) == CONST_INT
1149 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1150 return op1;
1151 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1152 return op0;
1153 /* A | (~A) -> -1 */
1154 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1155 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1156 && ! side_effects_p (op0)
1157 && GET_MODE_CLASS (mode) != MODE_CC)
1158 return constm1_rtx;
1159 break;
1161 case XOR:
1162 if (op1 == const0_rtx)
1163 return op0;
1164 if (GET_CODE (op1) == CONST_INT
1165 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1166 return gen_rtx_NOT (mode, op0);
1167 if (op0 == op1 && ! side_effects_p (op0)
1168 && GET_MODE_CLASS (mode) != MODE_CC)
1169 return const0_rtx;
1170 break;
1172 case AND:
1173 if (op1 == const0_rtx && ! side_effects_p (op0))
1174 return const0_rtx;
1175 if (GET_CODE (op1) == CONST_INT
1176 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1177 return op0;
1178 if (op0 == op1 && ! side_effects_p (op0)
1179 && GET_MODE_CLASS (mode) != MODE_CC)
1180 return op0;
1181 /* A & (~A) -> 0 */
1182 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1183 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1184 && ! side_effects_p (op0)
1185 && GET_MODE_CLASS (mode) != MODE_CC)
1186 return const0_rtx;
1187 break;
1189 case UDIV:
1190 /* Convert divide by power of two into shift (divide by 1 handled
1191 below). */
1192 if (GET_CODE (op1) == CONST_INT
1193 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1194 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1196 /* ... fall through ... */
1198 case DIV:
1199 if (op1 == CONST1_RTX (mode))
1200 return op0;
1202 /* In IEEE floating point, 0/x is not always 0. */
1203 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1204 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1205 && op0 == CONST0_RTX (mode)
1206 && ! side_effects_p (op1))
1207 return op0;
1209 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1210 /* Change division by a constant into multiplication. Only do
1211 this with -ffast-math until an expert says it is safe in
1212 general. */
1213 else if (GET_CODE (op1) == CONST_DOUBLE
1214 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1215 && op1 != CONST0_RTX (mode)
1216 && flag_fast_math)
1218 REAL_VALUE_TYPE d;
1219 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1221 if (! REAL_VALUES_EQUAL (d, dconst0))
1223 #if defined (REAL_ARITHMETIC)
1224 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1225 return gen_rtx_MULT (mode, op0,
1226 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1227 #else
1228 return
1229 gen_rtx_MULT (mode, op0,
1230 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1231 #endif
1234 #endif
1235 break;
1237 case UMOD:
1238 /* Handle modulus by power of two (mod with 1 handled below). */
1239 if (GET_CODE (op1) == CONST_INT
1240 && exact_log2 (INTVAL (op1)) > 0)
1241 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1243 /* ... fall through ... */
1245 case MOD:
1246 if ((op0 == const0_rtx || op1 == const1_rtx)
1247 && ! side_effects_p (op0) && ! side_effects_p (op1))
1248 return const0_rtx;
1249 break;
1251 case ROTATERT:
1252 case ROTATE:
1253 /* Rotating ~0 always results in ~0. */
1254 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1255 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1256 && ! side_effects_p (op1))
1257 return op0;
1259 /* ... fall through ... */
1261 case ASHIFT:
1262 case ASHIFTRT:
1263 case LSHIFTRT:
1264 if (op1 == const0_rtx)
1265 return op0;
1266 if (op0 == const0_rtx && ! side_effects_p (op1))
1267 return op0;
1268 break;
1270 case SMIN:
1271 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1272 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1273 && ! side_effects_p (op0))
1274 return op1;
1275 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1276 return op0;
1277 break;
1279 case SMAX:
1280 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1281 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1282 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1283 && ! side_effects_p (op0))
1284 return op1;
1285 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1286 return op0;
1287 break;
1289 case UMIN:
1290 if (op1 == const0_rtx && ! side_effects_p (op0))
1291 return op1;
1292 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1293 return op0;
1294 break;
1296 case UMAX:
1297 if (op1 == constm1_rtx && ! side_effects_p (op0))
1298 return op1;
1299 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1300 return op0;
1301 break;
1303 default:
1304 abort ();
1307 return 0;
1310 /* Get the integer argument values in two forms:
1311 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1313 arg0 = INTVAL (op0);
1314 arg1 = INTVAL (op1);
1316 if (width < HOST_BITS_PER_WIDE_INT)
1318 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1319 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1321 arg0s = arg0;
1322 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1323 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1325 arg1s = arg1;
1326 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1327 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1329 else
1331 arg0s = arg0;
1332 arg1s = arg1;
1335 /* Compute the value of the arithmetic. */
1337 switch (code)
1339 case PLUS:
1340 val = arg0s + arg1s;
1341 break;
1343 case MINUS:
1344 val = arg0s - arg1s;
1345 break;
1347 case MULT:
1348 val = arg0s * arg1s;
1349 break;
1351 case DIV:
1352 if (arg1s == 0)
1353 return 0;
1354 val = arg0s / arg1s;
1355 break;
1357 case MOD:
1358 if (arg1s == 0)
1359 return 0;
1360 val = arg0s % arg1s;
1361 break;
1363 case UDIV:
1364 if (arg1 == 0)
1365 return 0;
1366 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1367 break;
1369 case UMOD:
1370 if (arg1 == 0)
1371 return 0;
1372 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1373 break;
1375 case AND:
1376 val = arg0 & arg1;
1377 break;
1379 case IOR:
1380 val = arg0 | arg1;
1381 break;
1383 case XOR:
1384 val = arg0 ^ arg1;
1385 break;
1387 case LSHIFTRT:
1388 /* If shift count is undefined, don't fold it; let the machine do
1389 what it wants. But truncate it if the machine will do that. */
1390 if (arg1 < 0)
1391 return 0;
1393 #ifdef SHIFT_COUNT_TRUNCATED
1394 if (SHIFT_COUNT_TRUNCATED)
1395 arg1 %= width;
1396 #endif
1398 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1399 break;
1401 case ASHIFT:
1402 if (arg1 < 0)
1403 return 0;
1405 #ifdef SHIFT_COUNT_TRUNCATED
1406 if (SHIFT_COUNT_TRUNCATED)
1407 arg1 %= width;
1408 #endif
1410 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1411 break;
1413 case ASHIFTRT:
1414 if (arg1 < 0)
1415 return 0;
1417 #ifdef SHIFT_COUNT_TRUNCATED
1418 if (SHIFT_COUNT_TRUNCATED)
1419 arg1 %= width;
1420 #endif
1422 val = arg0s >> arg1;
1424 /* Bootstrap compiler may not have sign extended the right shift.
1425 Manually extend the sign to insure bootstrap cc matches gcc. */
1426 if (arg0s < 0 && arg1 > 0)
1427 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1429 break;
1431 case ROTATERT:
1432 if (arg1 < 0)
1433 return 0;
1435 arg1 %= width;
1436 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1437 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1438 break;
1440 case ROTATE:
1441 if (arg1 < 0)
1442 return 0;
1444 arg1 %= width;
1445 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1446 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1447 break;
1449 case COMPARE:
1450 /* Do nothing here. */
1451 return 0;
1453 case SMIN:
1454 val = arg0s <= arg1s ? arg0s : arg1s;
1455 break;
1457 case UMIN:
1458 val = ((unsigned HOST_WIDE_INT) arg0
1459 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1460 break;
1462 case SMAX:
1463 val = arg0s > arg1s ? arg0s : arg1s;
1464 break;
1466 case UMAX:
1467 val = ((unsigned HOST_WIDE_INT) arg0
1468 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1469 break;
1471 default:
1472 abort ();
1475 val = trunc_int_for_mode (val, mode);
1477 return GEN_INT (val);
1480 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1481 PLUS or MINUS.
1483 Rather than test for specific case, we do this by a brute-force method
1484 and do all possible simplifications until no more changes occur. Then
1485 we rebuild the operation. */
1487 static rtx
1488 simplify_plus_minus (code, mode, op0, op1)
1489 enum rtx_code code;
1490 enum machine_mode mode;
1491 rtx op0, op1;
1493 rtx ops[8];
1494 int negs[8];
1495 rtx result, tem;
1496 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1497 int first = 1, negate = 0, changed;
1498 int i, j;
1500 bzero ((char *) ops, sizeof ops);
1502 /* Set up the two operands and then expand them until nothing has been
1503 changed. If we run out of room in our array, give up; this should
1504 almost never happen. */
1506 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1508 changed = 1;
1509 while (changed)
1511 changed = 0;
1513 for (i = 0; i < n_ops; i++)
1514 switch (GET_CODE (ops[i]))
1516 case PLUS:
1517 case MINUS:
1518 if (n_ops == 7)
1519 return 0;
1521 ops[n_ops] = XEXP (ops[i], 1);
1522 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1523 ops[i] = XEXP (ops[i], 0);
1524 input_ops++;
1525 changed = 1;
1526 break;
1528 case NEG:
1529 ops[i] = XEXP (ops[i], 0);
1530 negs[i] = ! negs[i];
1531 changed = 1;
1532 break;
1534 case CONST:
1535 ops[i] = XEXP (ops[i], 0);
1536 input_consts++;
1537 changed = 1;
1538 break;
1540 case NOT:
1541 /* ~a -> (-a - 1) */
1542 if (n_ops != 7)
1544 ops[n_ops] = constm1_rtx;
1545 negs[n_ops++] = negs[i];
1546 ops[i] = XEXP (ops[i], 0);
1547 negs[i] = ! negs[i];
1548 changed = 1;
1550 break;
1552 case CONST_INT:
1553 if (negs[i])
1554 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1555 break;
1557 default:
1558 break;
1562 /* If we only have two operands, we can't do anything. */
1563 if (n_ops <= 2)
1564 return 0;
1566 /* Now simplify each pair of operands until nothing changes. The first
1567 time through just simplify constants against each other. */
1569 changed = 1;
1570 while (changed)
1572 changed = first;
1574 for (i = 0; i < n_ops - 1; i++)
1575 for (j = i + 1; j < n_ops; j++)
1576 if (ops[i] != 0 && ops[j] != 0
1577 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1579 rtx lhs = ops[i], rhs = ops[j];
1580 enum rtx_code ncode = PLUS;
1582 if (negs[i] && ! negs[j])
1583 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1584 else if (! negs[i] && negs[j])
1585 ncode = MINUS;
1587 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1588 if (tem)
1590 ops[i] = tem, ops[j] = 0;
1591 negs[i] = negs[i] && negs[j];
1592 if (GET_CODE (tem) == NEG)
1593 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1595 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1596 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1597 changed = 1;
1601 first = 0;
1604 /* Pack all the operands to the lower-numbered entries and give up if
1605 we didn't reduce the number of operands we had. Make sure we
1606 count a CONST as two operands. If we have the same number of
1607 operands, but have made more CONSTs than we had, this is also
1608 an improvement, so accept it. */
1610 for (i = 0, j = 0; j < n_ops; j++)
1611 if (ops[j] != 0)
1613 ops[i] = ops[j], negs[i++] = negs[j];
1614 if (GET_CODE (ops[j]) == CONST)
1615 n_consts++;
1618 if (i + n_consts > input_ops
1619 || (i + n_consts == input_ops && n_consts <= input_consts))
1620 return 0;
1622 n_ops = i;
1624 /* If we have a CONST_INT, put it last. */
1625 for (i = 0; i < n_ops - 1; i++)
1626 if (GET_CODE (ops[i]) == CONST_INT)
1628 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1629 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1632 /* Put a non-negated operand first. If there aren't any, make all
1633 operands positive and negate the whole thing later. */
1634 for (i = 0; i < n_ops && negs[i]; i++)
1637 if (i == n_ops)
1639 for (i = 0; i < n_ops; i++)
1640 negs[i] = 0;
1641 negate = 1;
1643 else if (i != 0)
1645 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1646 j = negs[0], negs[0] = negs[i], negs[i] = j;
1649 /* Now make the result by performing the requested operations. */
1650 result = ops[0];
1651 for (i = 1; i < n_ops; i++)
1652 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1654 return negate ? gen_rtx_NEG (mode, result) : result;
1657 struct cfc_args
1659 rtx op0, op1; /* Input */
1660 int equal, op0lt, op1lt; /* Output */
1663 static void
1664 check_fold_consts (data)
1665 PTR data;
1667 struct cfc_args *args = (struct cfc_args *) data;
1668 REAL_VALUE_TYPE d0, d1;
1670 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1671 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1672 args->equal = REAL_VALUES_EQUAL (d0, d1);
1673 args->op0lt = REAL_VALUES_LESS (d0, d1);
1674 args->op1lt = REAL_VALUES_LESS (d1, d0);
1677 /* Like simplify_binary_operation except used for relational operators.
1678 MODE is the mode of the operands, not that of the result. If MODE
1679 is VOIDmode, both operands must also be VOIDmode and we compare the
1680 operands in "infinite precision".
1682 If no simplification is possible, this function returns zero. Otherwise,
1683 it returns either const_true_rtx or const0_rtx. */
1686 simplify_relational_operation (code, mode, op0, op1)
1687 enum rtx_code code;
1688 enum machine_mode mode;
1689 rtx op0, op1;
1691 int equal, op0lt, op0ltu, op1lt, op1ltu;
1692 rtx tem;
1694 /* If op0 is a compare, extract the comparison arguments from it. */
1695 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1696 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1698 /* We can't simplify MODE_CC values since we don't know what the
1699 actual comparison is. */
1700 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1701 #ifdef HAVE_cc0
1702 || op0 == cc0_rtx
1703 #endif
1705 return 0;
1707 /* Make sure the constant is second. */
1708 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1709 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1711 tem = op0, op0 = op1, op1 = tem;
1712 code = swap_condition (code);
1715 /* For integer comparisons of A and B maybe we can simplify A - B and can
1716 then simplify a comparison of that with zero. If A and B are both either
1717 a register or a CONST_INT, this can't help; testing for these cases will
1718 prevent infinite recursion here and speed things up.
1720 If CODE is an unsigned comparison, then we can never do this optimization,
1721 because it gives an incorrect result if the subtraction wraps around zero.
1722 ANSI C defines unsigned operations such that they never overflow, and
1723 thus such cases can not be ignored. */
1725 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1726 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1727 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1728 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1729 && code != GTU && code != GEU && code != LTU && code != LEU)
1730 return simplify_relational_operation (signed_condition (code),
1731 mode, tem, const0_rtx);
1733 /* For non-IEEE floating-point, if the two operands are equal, we know the
1734 result. */
1735 if (rtx_equal_p (op0, op1)
1736 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1737 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
1738 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1740 /* If the operands are floating-point constants, see if we can fold
1741 the result. */
1742 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1743 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1744 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1746 struct cfc_args args;
1748 /* Setup input for check_fold_consts() */
1749 args.op0 = op0;
1750 args.op1 = op1;
1752 if (do_float_handler(check_fold_consts, (PTR) &args) == 0)
1753 /* We got an exception from check_fold_consts() */
1754 return 0;
1756 /* Receive output from check_fold_consts() */
1757 equal = args.equal;
1758 op0lt = op0ltu = args.op0lt;
1759 op1lt = op1ltu = args.op1lt;
1761 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1763 /* Otherwise, see if the operands are both integers. */
1764 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1765 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1766 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1768 int width = GET_MODE_BITSIZE (mode);
1769 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1770 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1772 /* Get the two words comprising each integer constant. */
1773 if (GET_CODE (op0) == CONST_DOUBLE)
1775 l0u = l0s = CONST_DOUBLE_LOW (op0);
1776 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1778 else
1780 l0u = l0s = INTVAL (op0);
1781 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1784 if (GET_CODE (op1) == CONST_DOUBLE)
1786 l1u = l1s = CONST_DOUBLE_LOW (op1);
1787 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1789 else
1791 l1u = l1s = INTVAL (op1);
1792 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1795 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1796 we have to sign or zero-extend the values. */
1797 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1798 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1800 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1802 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1803 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1805 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1806 l0s |= ((HOST_WIDE_INT) (-1) << width);
1808 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1809 l1s |= ((HOST_WIDE_INT) (-1) << width);
1812 equal = (h0u == h1u && l0u == l1u);
1813 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1814 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1815 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1816 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1819 /* Otherwise, there are some code-specific tests we can make. */
1820 else
1822 switch (code)
1824 case EQ:
1825 /* References to the frame plus a constant or labels cannot
1826 be zero, but a SYMBOL_REF can due to #pragma weak. */
1827 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1828 || GET_CODE (op0) == LABEL_REF)
1829 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1830 /* On some machines, the ap reg can be 0 sometimes. */
1831 && op0 != arg_pointer_rtx
1832 #endif
1834 return const0_rtx;
1835 break;
1837 case NE:
1838 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1839 || GET_CODE (op0) == LABEL_REF)
1840 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1841 && op0 != arg_pointer_rtx
1842 #endif
1844 return const_true_rtx;
1845 break;
1847 case GEU:
1848 /* Unsigned values are never negative. */
1849 if (op1 == const0_rtx)
1850 return const_true_rtx;
1851 break;
1853 case LTU:
1854 if (op1 == const0_rtx)
1855 return const0_rtx;
1856 break;
1858 case LEU:
1859 /* Unsigned values are never greater than the largest
1860 unsigned value. */
1861 if (GET_CODE (op1) == CONST_INT
1862 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1863 && INTEGRAL_MODE_P (mode))
1864 return const_true_rtx;
1865 break;
1867 case GTU:
1868 if (GET_CODE (op1) == CONST_INT
1869 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1870 && INTEGRAL_MODE_P (mode))
1871 return const0_rtx;
1872 break;
1874 default:
1875 break;
1878 return 0;
1881 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1882 as appropriate. */
1883 switch (code)
1885 case EQ:
1886 return equal ? const_true_rtx : const0_rtx;
1887 case NE:
1888 return ! equal ? const_true_rtx : const0_rtx;
1889 case LT:
1890 return op0lt ? const_true_rtx : const0_rtx;
1891 case GT:
1892 return op1lt ? const_true_rtx : const0_rtx;
1893 case LTU:
1894 return op0ltu ? const_true_rtx : const0_rtx;
1895 case GTU:
1896 return op1ltu ? const_true_rtx : const0_rtx;
1897 case LE:
1898 return equal || op0lt ? const_true_rtx : const0_rtx;
1899 case GE:
1900 return equal || op1lt ? const_true_rtx : const0_rtx;
1901 case LEU:
1902 return equal || op0ltu ? const_true_rtx : const0_rtx;
1903 case GEU:
1904 return equal || op1ltu ? const_true_rtx : const0_rtx;
1905 default:
1906 abort ();
1910 /* Simplify CODE, an operation with result mode MODE and three operands,
1911 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1912 a constant. Return 0 if no simplifications is possible. */
1915 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
1916 enum rtx_code code;
1917 enum machine_mode mode, op0_mode;
1918 rtx op0, op1, op2;
1920 unsigned int width = GET_MODE_BITSIZE (mode);
1922 /* VOIDmode means "infinite" precision. */
1923 if (width == 0)
1924 width = HOST_BITS_PER_WIDE_INT;
1926 switch (code)
1928 case SIGN_EXTRACT:
1929 case ZERO_EXTRACT:
1930 if (GET_CODE (op0) == CONST_INT
1931 && GET_CODE (op1) == CONST_INT
1932 && GET_CODE (op2) == CONST_INT
1933 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
1934 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
1936 /* Extracting a bit-field from a constant */
1937 HOST_WIDE_INT val = INTVAL (op0);
1939 if (BITS_BIG_ENDIAN)
1940 val >>= (GET_MODE_BITSIZE (op0_mode)
1941 - INTVAL (op2) - INTVAL (op1));
1942 else
1943 val >>= INTVAL (op2);
1945 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
1947 /* First zero-extend. */
1948 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
1949 /* If desired, propagate sign bit. */
1950 if (code == SIGN_EXTRACT
1951 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
1952 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
1955 /* Clear the bits that don't belong in our mode,
1956 unless they and our sign bit are all one.
1957 So we get either a reasonable negative value or a reasonable
1958 unsigned value for this mode. */
1959 if (width < HOST_BITS_PER_WIDE_INT
1960 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
1961 != ((HOST_WIDE_INT) (-1) << (width - 1))))
1962 val &= ((HOST_WIDE_INT) 1 << width) - 1;
1964 return GEN_INT (val);
1966 break;
1968 case IF_THEN_ELSE:
1969 if (GET_CODE (op0) == CONST_INT)
1970 return op0 != const0_rtx ? op1 : op2;
1972 /* Convert a == b ? b : a to "a". */
1973 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
1974 && rtx_equal_p (XEXP (op0, 0), op1)
1975 && rtx_equal_p (XEXP (op0, 1), op2))
1976 return op1;
1977 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
1978 && rtx_equal_p (XEXP (op0, 1), op1)
1979 && rtx_equal_p (XEXP (op0, 0), op2))
1980 return op2;
1981 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
1983 rtx temp
1984 = simplify_relational_operation (GET_CODE (op0), op0_mode,
1985 XEXP (op0, 0), XEXP (op0, 1));
1987 /* See if any simplifications were possible. */
1988 if (temp == const0_rtx)
1989 return op2;
1990 else if (temp == const1_rtx)
1991 return op1;
1992 else if (temp)
1993 op0 = temp;
1995 /* Look for happy constants in op1 and op2. */
1996 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
1998 HOST_WIDE_INT t = INTVAL (op1);
1999 HOST_WIDE_INT f = INTVAL (op2);
2001 if (t == STORE_FLAG_VALUE && f == 0)
2002 code = GET_CODE (op0);
2003 else if (t == 0 && f == STORE_FLAG_VALUE
2004 && can_reverse_comparison_p (op0, NULL_RTX))
2005 code = reverse_condition (GET_CODE (op0));
2006 else
2007 break;
2009 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2012 break;
2014 default:
2015 abort ();
2018 return 0;
2021 /* Simplify X, an rtx expression.
2023 Return the simplified expression or NULL if no simplifications
2024 were possible.
2026 This is the preferred entry point into the simplification routines;
2027 however, we still allow passes to call the more specific routines.
2029 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2030 code that need to be unified.
2032 1. fold_rtx in cse.c. This code uses various CSE specific
2033 information to aid in RTL simplification.
2035 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2036 it uses combine specific information to aid in RTL
2037 simplification.
2039 3. The routines in this file.
2042 Long term we want to only have one body of simplification code; to
2043 get to that state I recommend the following steps:
2045 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2046 which are not pass dependent state into these routines.
2048 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2049 use this routine whenever possible.
2051 3. Allow for pass dependent state to be provided to these
2052 routines and add simplifications based on the pass dependent
2053 state. Remove code from cse.c & combine.c that becomes
2054 redundant/dead.
2056 It will take time, but ultimately the compiler will be easier to
2057 maintain and improve. It's totally silly that when we add a
2058 simplification that it needs to be added to 4 places (3 for RTL
2059 simplification and 1 for tree simplification. */
2062 simplify_rtx (x)
2063 rtx x;
2065 enum rtx_code code;
2066 enum machine_mode mode;
2068 mode = GET_MODE (x);
2069 code = GET_CODE (x);
2071 switch (GET_RTX_CLASS (code))
2073 case '1':
2074 return simplify_unary_operation (code, mode,
2075 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2076 case '2':
2077 case 'c':
2078 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2080 case '3':
2081 case 'b':
2082 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2083 XEXP (x, 0), XEXP (x, 1), XEXP (x, 2));
2085 case '<':
2086 return simplify_relational_operation (code, GET_MODE (XEXP (x, 0)),
2087 XEXP (x, 0), XEXP (x, 1));
2088 default:
2089 return NULL;
2094 /* Allocate a struct elt_list and fill in its two elements with the
2095 arguments. */
2097 static struct elt_list *
2098 new_elt_list (next, elt)
2099 struct elt_list *next;
2100 cselib_val *elt;
2102 struct elt_list *el = empty_elt_lists;
2104 if (el)
2105 empty_elt_lists = el->next;
2106 else
2107 el = (struct elt_list *) obstack_alloc (&cselib_obstack,
2108 sizeof (struct elt_list));
2109 el->next = next;
2110 el->elt = elt;
2111 return el;
2114 /* Allocate a struct elt_loc_list and fill in its two elements with the
2115 arguments. */
2117 static struct elt_loc_list *
2118 new_elt_loc_list (next, loc)
2119 struct elt_loc_list *next;
2120 rtx loc;
2122 struct elt_loc_list *el = empty_elt_loc_lists;
2124 if (el)
2125 empty_elt_loc_lists = el->next;
2126 else
2127 el = (struct elt_loc_list *) obstack_alloc (&cselib_obstack,
2128 sizeof (struct elt_loc_list));
2129 el->next = next;
2130 el->loc = loc;
2131 el->setting_insn = cselib_current_insn;
2132 return el;
2135 /* The elt_list at *PL is no longer needed. Unchain it and free its
2136 storage. */
2138 static void
2139 unchain_one_elt_list (pl)
2140 struct elt_list **pl;
2142 struct elt_list *l = *pl;
2144 *pl = l->next;
2145 l->next = empty_elt_lists;
2146 empty_elt_lists = l;
2149 /* Likewise for elt_loc_lists. */
2151 static void
2152 unchain_one_elt_loc_list (pl)
2153 struct elt_loc_list **pl;
2155 struct elt_loc_list *l = *pl;
2157 *pl = l->next;
2158 l->next = empty_elt_loc_lists;
2159 empty_elt_loc_lists = l;
2162 /* Likewise for cselib_vals. This also frees the addr_list associated with
2163 V. */
2165 static void
2166 unchain_one_value (v)
2167 cselib_val *v;
2169 while (v->addr_list)
2170 unchain_one_elt_list (&v->addr_list);
2172 v->u.next_free = empty_vals;
2173 empty_vals = v;
2176 /* Remove all entries from the hash table. Also used during
2177 initialization. */
2179 static void
2180 clear_table ()
2182 unsigned int i;
2184 for (i = 0; i < cselib_nregs; i++)
2185 REG_VALUES (i) = 0;
2187 htab_empty (hash_table);
2188 obstack_free (&cselib_obstack, cselib_startobj);
2190 empty_vals = 0;
2191 empty_elt_lists = 0;
2192 empty_elt_loc_lists = 0;
2193 n_useless_values = 0;
2195 next_unknown_value = 0;
2198 /* The equality test for our hash table. The first argument ENTRY is a table
2199 element (i.e. a cselib_val), while the second arg X is an rtx. */
2201 static int
2202 entry_and_rtx_equal_p (entry, x_arg)
2203 const void *entry, *x_arg;
2205 struct elt_loc_list *l;
2206 const cselib_val *v = (const cselib_val *) entry;
2207 rtx x = (rtx) x_arg;
2209 /* We don't guarantee that distinct rtx's have different hash values,
2210 so we need to do a comparison. */
2211 for (l = v->locs; l; l = l->next)
2212 if (rtx_equal_for_cselib_p (l->loc, x))
2213 return 1;
2215 return 0;
2218 /* The hash function for our hash table. The value is always computed with
2219 hash_rtx when adding an element; this function just extracts the hash
2220 value from a cselib_val structure. */
2222 static unsigned int
2223 get_value_hash (entry)
2224 const void *entry;
2226 const cselib_val *v = (const cselib_val *) entry;
2227 return v->value;
2230 /* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we
2231 only return true for values which point to a cselib_val whose value
2232 element has been set to zero, which implies the cselib_val will be
2233 removed. */
2236 references_value_p (x, only_useless)
2237 rtx x;
2238 int only_useless;
2240 enum rtx_code code = GET_CODE (x);
2241 const char *fmt = GET_RTX_FORMAT (code);
2242 int i, j;
2244 if (GET_CODE (x) == VALUE
2245 && (! only_useless || CSELIB_VAL_PTR (x)->locs == 0))
2246 return 1;
2248 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2250 if (fmt[i] == 'e' && references_value_p (XEXP (x, i), only_useless))
2251 return 1;
2252 else if (fmt[i] == 'E')
2253 for (j = 0; j < XVECLEN (x, i); j++)
2254 if (references_value_p (XVECEXP (x, i, j), only_useless))
2255 return 1;
2258 return 0;
2261 /* For all locations found in X, delete locations that reference useless
2262 values (i.e. values without any location). Called through
2263 htab_traverse. */
2265 static int
2266 discard_useless_locs (x, info)
2267 void **x;
2268 void *info ATTRIBUTE_UNUSED;
2270 cselib_val *v = (cselib_val *)*x;
2271 struct elt_loc_list **p = &v->locs;
2272 int had_locs = v->locs != 0;
2274 while (*p)
2276 if (references_value_p ((*p)->loc, 1))
2277 unchain_one_elt_loc_list (p);
2278 else
2279 p = &(*p)->next;
2282 if (had_locs && v->locs == 0)
2284 n_useless_values++;
2285 values_became_useless = 1;
2287 return 1;
2290 /* If X is a value with no locations, remove it from the hashtable. */
2292 static int
2293 discard_useless_values (x, info)
2294 void **x;
2295 void *info ATTRIBUTE_UNUSED;
2297 cselib_val *v = (cselib_val *)*x;
2299 if (v->locs == 0)
2301 htab_clear_slot (hash_table, x);
2302 unchain_one_value (v);
2303 n_useless_values--;
2306 return 1;
2309 /* Clean out useless values (i.e. those which no longer have locations
2310 associated with them) from the hash table. */
2312 static void
2313 remove_useless_values ()
2315 /* First pass: eliminate locations that reference the value. That in
2316 turn can make more values useless. */
2319 values_became_useless = 0;
2320 htab_traverse (hash_table, discard_useless_locs, 0);
2322 while (values_became_useless);
2324 /* Second pass: actually remove the values. */
2325 htab_traverse (hash_table, discard_useless_values, 0);
2327 if (n_useless_values != 0)
2328 abort ();
2331 /* Return nonzero if we can prove that X and Y contain the same value, taking
2332 our gathered information into account. */
2335 rtx_equal_for_cselib_p (x, y)
2336 rtx x, y;
2338 enum rtx_code code;
2339 const char *fmt;
2340 int i;
2342 if (GET_CODE (x) == REG || GET_CODE (x) == MEM)
2344 cselib_val *e = cselib_lookup (x, VOIDmode, 0);
2346 if (e)
2347 x = e->u.val_rtx;
2350 if (GET_CODE (y) == REG || GET_CODE (y) == MEM)
2352 cselib_val *e = cselib_lookup (y, VOIDmode, 0);
2354 if (e)
2355 y = e->u.val_rtx;
2358 if (x == y)
2359 return 1;
2361 if (GET_CODE (x) == VALUE && GET_CODE (y) == VALUE)
2362 return CSELIB_VAL_PTR (x) == CSELIB_VAL_PTR (y);
2364 if (GET_CODE (x) == VALUE)
2366 cselib_val *e = CSELIB_VAL_PTR (x);
2367 struct elt_loc_list *l;
2369 for (l = e->locs; l; l = l->next)
2371 rtx t = l->loc;
2373 /* Avoid infinite recursion. */
2374 if (GET_CODE (t) == REG || GET_CODE (t) == MEM)
2375 continue;
2376 else if (rtx_equal_for_cselib_p (t, y))
2377 return 1;
2380 return 0;
2383 if (GET_CODE (y) == VALUE)
2385 cselib_val *e = CSELIB_VAL_PTR (y);
2386 struct elt_loc_list *l;
2388 for (l = e->locs; l; l = l->next)
2390 rtx t = l->loc;
2392 if (GET_CODE (t) == REG || GET_CODE (t) == MEM)
2393 continue;
2394 else if (rtx_equal_for_cselib_p (x, t))
2395 return 1;
2398 return 0;
2401 if (GET_CODE (x) != GET_CODE (y) || GET_MODE (x) != GET_MODE (y))
2402 return 0;
2404 /* This won't be handled correctly by the code below. */
2405 if (GET_CODE (x) == LABEL_REF)
2406 return XEXP (x, 0) == XEXP (y, 0);
2408 code = GET_CODE (x);
2409 fmt = GET_RTX_FORMAT (code);
2411 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2413 int j;
2415 switch (fmt[i])
2417 case 'w':
2418 if (XWINT (x, i) != XWINT (y, i))
2419 return 0;
2420 break;
2422 case 'n':
2423 case 'i':
2424 if (XINT (x, i) != XINT (y, i))
2425 return 0;
2426 break;
2428 case 'V':
2429 case 'E':
2430 /* Two vectors must have the same length. */
2431 if (XVECLEN (x, i) != XVECLEN (y, i))
2432 return 0;
2434 /* And the corresponding elements must match. */
2435 for (j = 0; j < XVECLEN (x, i); j++)
2436 if (! rtx_equal_for_cselib_p (XVECEXP (x, i, j),
2437 XVECEXP (y, i, j)))
2438 return 0;
2439 break;
2441 case 'e':
2442 if (! rtx_equal_for_cselib_p (XEXP (x, i), XEXP (y, i)))
2443 return 0;
2444 break;
2446 case 'S':
2447 case 's':
2448 if (strcmp (XSTR (x, i), XSTR (y, i)))
2449 return 0;
2450 break;
2452 case 'u':
2453 /* These are just backpointers, so they don't matter. */
2454 break;
2456 case '0':
2457 case 't':
2458 break;
2460 /* It is believed that rtx's at this level will never
2461 contain anything but integers and other rtx's,
2462 except for within LABEL_REFs and SYMBOL_REFs. */
2463 default:
2464 abort ();
2467 return 1;
2470 /* Hash an rtx. Return 0 if we couldn't hash the rtx.
2471 For registers and memory locations, we look up their cselib_val structure
2472 and return its VALUE element.
2473 Possible reasons for return 0 are: the object is volatile, or we couldn't
2474 find a register or memory location in the table and CREATE is zero. If
2475 CREATE is nonzero, table elts are created for regs and mem.
2476 MODE is used in hashing for CONST_INTs only;
2477 otherwise the mode of X is used. */
2479 static unsigned int
2480 hash_rtx (x, mode, create)
2481 rtx x;
2482 enum machine_mode mode;
2483 int create;
2485 cselib_val *e;
2486 int i, j;
2487 enum rtx_code code;
2488 const char *fmt;
2489 unsigned int hash = 0;
2491 /* repeat is used to turn tail-recursion into iteration. */
2492 repeat:
2493 code = GET_CODE (x);
2494 hash += (unsigned) code + (unsigned) GET_MODE (x);
2496 switch (code)
2498 case MEM:
2499 case REG:
2500 e = cselib_lookup (x, GET_MODE (x), create);
2501 if (! e)
2502 return 0;
2504 hash += e->value;
2505 return hash;
2507 case CONST_INT:
2508 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + INTVAL (x);
2509 return hash ? hash : CONST_INT;
2511 case CONST_DOUBLE:
2512 /* This is like the general case, except that it only counts
2513 the integers representing the constant. */
2514 hash += (unsigned) code + (unsigned) GET_MODE (x);
2515 if (GET_MODE (x) != VOIDmode)
2516 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
2517 hash += XWINT (x, i);
2518 else
2519 hash += ((unsigned) CONST_DOUBLE_LOW (x)
2520 + (unsigned) CONST_DOUBLE_HIGH (x));
2521 return hash ? hash : CONST_DOUBLE;
2523 /* Assume there is only one rtx object for any given label. */
2524 case LABEL_REF:
2525 hash
2526 += ((unsigned) LABEL_REF << 7) + (unsigned long) XEXP (x, 0);
2527 return hash ? hash : LABEL_REF;
2529 case SYMBOL_REF:
2530 hash
2531 += ((unsigned) SYMBOL_REF << 7) + (unsigned long) XSTR (x, 0);
2532 return hash ? hash : SYMBOL_REF;
2534 case PRE_DEC:
2535 case PRE_INC:
2536 case POST_DEC:
2537 case POST_INC:
2538 case PC:
2539 case CC0:
2540 case CALL:
2541 case UNSPEC_VOLATILE:
2542 return 0;
2544 case ASM_OPERANDS:
2545 if (MEM_VOLATILE_P (x))
2546 return 0;
2548 break;
2550 default:
2551 break;
2554 i = GET_RTX_LENGTH (code) - 1;
2555 fmt = GET_RTX_FORMAT (code);
2556 for (; i >= 0; i--)
2558 if (fmt[i] == 'e')
2560 rtx tem = XEXP (x, i);
2561 unsigned int tem_hash;
2563 /* If we are about to do the last recursive call
2564 needed at this level, change it into iteration.
2565 This function is called enough to be worth it. */
2566 if (i == 0)
2568 x = tem;
2569 goto repeat;
2572 tem_hash = hash_rtx (tem, 0, create);
2573 if (tem_hash == 0)
2574 return 0;
2576 hash += tem_hash;
2578 else if (fmt[i] == 'E')
2579 for (j = 0; j < XVECLEN (x, i); j++)
2581 unsigned int tem_hash = hash_rtx (XVECEXP (x, i, j), 0, create);
2583 if (tem_hash == 0)
2584 return 0;
2586 hash += tem_hash;
2588 else if (fmt[i] == 's')
2590 const unsigned char *p = (const unsigned char *) XSTR (x, i);
2592 if (p)
2593 while (*p)
2594 hash += *p++;
2596 else if (fmt[i] == 'i')
2597 hash += XINT (x, i);
2598 else if (fmt[i] == '0' || fmt[i] == 't')
2599 /* unused */;
2600 else
2601 abort ();
2604 return hash ? hash : 1 + GET_CODE (x);
2607 /* Create a new value structure for VALUE and initialize it. The mode of the
2608 value is MODE. */
2610 static cselib_val *
2611 new_cselib_val (value, mode)
2612 unsigned int value;
2613 enum machine_mode mode;
2615 cselib_val *e = empty_vals;
2617 if (e)
2618 empty_vals = e->u.next_free;
2619 else
2620 e = (cselib_val *) obstack_alloc (&cselib_obstack, sizeof (cselib_val));
2622 if (value == 0)
2623 abort ();
2625 e->value = value;
2626 e->u.val_rtx = gen_rtx_VALUE (mode);
2627 CSELIB_VAL_PTR (e->u.val_rtx) = e;
2628 e->addr_list = 0;
2629 e->locs = 0;
2630 return e;
2633 /* ADDR_ELT is a value that is used as address. MEM_ELT is the value that
2634 contains the data at this address. X is a MEM that represents the
2635 value. Update the two value structures to represent this situation. */
2637 static void
2638 add_mem_for_addr (addr_elt, mem_elt, x)
2639 cselib_val *addr_elt, *mem_elt;
2640 rtx x;
2642 rtx new;
2643 struct elt_loc_list *l;
2645 /* Avoid duplicates. */
2646 for (l = mem_elt->locs; l; l = l->next)
2647 if (GET_CODE (l->loc) == MEM
2648 && CSELIB_VAL_PTR (XEXP (l->loc, 0)) == addr_elt)
2649 return;
2651 new = gen_rtx_MEM (GET_MODE (x), addr_elt->u.val_rtx);
2652 MEM_COPY_ATTRIBUTES (new, x);
2654 addr_elt->addr_list = new_elt_list (addr_elt->addr_list, mem_elt);
2655 mem_elt->locs = new_elt_loc_list (mem_elt->locs, new);
2658 /* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx.
2659 If CREATE, make a new one if we haven't seen it before. */
2661 static cselib_val *
2662 cselib_lookup_mem (x, create)
2663 rtx x;
2664 int create;
2666 void **slot;
2667 cselib_val *addr;
2668 cselib_val *mem_elt;
2669 struct elt_list *l;
2671 if (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode
2672 || (FLOAT_MODE_P (GET_MODE (x)) && flag_float_store))
2673 return 0;
2675 /* Look up the value for the address. */
2676 addr = cselib_lookup (XEXP (x, 0), GET_MODE (x), create);
2677 if (! addr)
2678 return 0;
2680 /* Find a value that describes a value of our mode at that address. */
2681 for (l = addr->addr_list; l; l = l->next)
2682 if (GET_MODE (l->elt->u.val_rtx) == GET_MODE (x))
2683 return l->elt;
2685 if (! create)
2686 return 0;
2688 mem_elt = new_cselib_val (++next_unknown_value, GET_MODE (x));
2689 add_mem_for_addr (addr, mem_elt, x);
2690 slot = htab_find_slot_with_hash (hash_table, x, mem_elt->value, INSERT);
2691 *slot = mem_elt;
2692 return mem_elt;
2695 /* Walk rtx X and replace all occurrences of REG and MEM subexpressions
2696 with VALUE expressions. This way, it becomes independent of changes
2697 to registers and memory.
2698 X isn't actually modified; if modifications are needed, new rtl is
2699 allocated. However, the return value can share rtl with X. */
2701 static rtx
2702 cselib_subst_to_values (x)
2703 rtx x;
2705 enum rtx_code code = GET_CODE (x);
2706 const char *fmt = GET_RTX_FORMAT (code);
2707 cselib_val *e;
2708 struct elt_list *l;
2709 rtx copy = x;
2710 int i;
2712 switch (code)
2714 case REG:
2715 for (l = REG_VALUES (REGNO (x)); l; l = l->next)
2716 if (GET_MODE (l->elt->u.val_rtx) == GET_MODE (x))
2717 return l->elt->u.val_rtx;
2719 abort ();
2721 case MEM:
2722 e = cselib_lookup_mem (x, 0);
2723 if (! e)
2724 abort ();
2725 return e->u.val_rtx;
2727 /* CONST_DOUBLEs must be special-cased here so that we won't try to
2728 look up the CONST_DOUBLE_MEM inside. */
2729 case CONST_DOUBLE:
2730 case CONST_INT:
2731 return x;
2733 default:
2734 break;
2737 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2739 if (fmt[i] == 'e')
2741 rtx t = cselib_subst_to_values (XEXP (x, i));
2743 if (t != XEXP (x, i) && x == copy)
2744 copy = shallow_copy_rtx (x);
2746 XEXP (copy, i) = t;
2748 else if (fmt[i] == 'E')
2750 int j, k;
2752 for (j = 0; j < XVECLEN (x, i); j++)
2754 rtx t = cselib_subst_to_values (XVECEXP (x, i, j));
2756 if (t != XVECEXP (x, i, j) && XVEC (x, i) == XVEC (copy, i))
2758 if (x == copy)
2759 copy = shallow_copy_rtx (x);
2761 XVEC (copy, i) = rtvec_alloc (XVECLEN (x, i));
2762 for (k = 0; k < j; k++)
2763 XVECEXP (copy, i, k) = XVECEXP (x, i, k);
2766 XVECEXP (copy, i, j) = t;
2771 return copy;
2774 /* Look up the rtl expression X in our tables and return the value it has.
2775 If CREATE is zero, we return NULL if we don't know the value. Otherwise,
2776 we create a new one if possible, using mode MODE if X doesn't have a mode
2777 (i.e. because it's a constant). */
2779 cselib_val *
2780 cselib_lookup (x, mode, create)
2781 rtx x;
2782 enum machine_mode mode;
2783 int create;
2785 void **slot;
2786 cselib_val *e;
2787 unsigned int hashval;
2789 if (GET_MODE (x) != VOIDmode)
2790 mode = GET_MODE (x);
2792 if (GET_CODE (x) == VALUE)
2793 return CSELIB_VAL_PTR (x);
2795 if (GET_CODE (x) == REG)
2797 struct elt_list *l;
2798 unsigned int i = REGNO (x);
2800 for (l = REG_VALUES (i); l; l = l->next)
2801 if (mode == GET_MODE (l->elt->u.val_rtx))
2802 return l->elt;
2804 if (! create)
2805 return 0;
2807 e = new_cselib_val (++next_unknown_value, GET_MODE (x));
2808 e->locs = new_elt_loc_list (e->locs, x);
2809 REG_VALUES (i) = new_elt_list (REG_VALUES (i), e);
2810 slot = htab_find_slot_with_hash (hash_table, x, e->value, INSERT);
2811 *slot = e;
2812 return e;
2815 if (GET_CODE (x) == MEM)
2816 return cselib_lookup_mem (x, create);
2818 hashval = hash_rtx (x, mode, create);
2819 /* Can't even create if hashing is not possible. */
2820 if (! hashval)
2821 return 0;
2823 slot = htab_find_slot_with_hash (hash_table, x, hashval,
2824 create ? INSERT : NO_INSERT);
2825 if (slot == 0)
2826 return 0;
2828 e = (cselib_val *) *slot;
2829 if (e)
2830 return e;
2832 e = new_cselib_val (hashval, mode);
2834 /* We have to fill the slot before calling cselib_subst_to_values:
2835 the hash table is inconsistent until we do so, and
2836 cselib_subst_to_values will need to do lookups. */
2837 *slot = (void *) e;
2838 e->locs = new_elt_loc_list (e->locs, cselib_subst_to_values (x));
2839 return e;
2842 /* Invalidate any entries in reg_values that overlap REGNO. This is called
2843 if REGNO is changing. MODE is the mode of the assignment to REGNO, which
2844 is used to determine how many hard registers are being changed. If MODE
2845 is VOIDmode, then only REGNO is being changed; this is used when
2846 invalidating call clobbered registers across a call. */
2848 static void
2849 cselib_invalidate_regno (regno, mode)
2850 unsigned int regno;
2851 enum machine_mode mode;
2853 unsigned int endregno;
2854 unsigned int i;
2856 /* If we see pseudos after reload, something is _wrong_. */
2857 if (reload_completed && regno >= FIRST_PSEUDO_REGISTER
2858 && reg_renumber[regno] >= 0)
2859 abort ();
2861 /* Determine the range of registers that must be invalidated. For
2862 pseudos, only REGNO is affected. For hard regs, we must take MODE
2863 into account, and we must also invalidate lower register numbers
2864 if they contain values that overlap REGNO. */
2865 endregno = regno + 1;
2866 if (regno < FIRST_PSEUDO_REGISTER && mode != VOIDmode)
2867 endregno = regno + HARD_REGNO_NREGS (regno, mode);
2869 for (i = 0; i < endregno; i++)
2871 struct elt_list **l = &REG_VALUES (i);
2873 /* Go through all known values for this reg; if it overlaps the range
2874 we're invalidating, remove the value. */
2875 while (*l)
2877 cselib_val *v = (*l)->elt;
2878 struct elt_loc_list **p;
2879 unsigned int this_last = i;
2881 if (i < FIRST_PSEUDO_REGISTER)
2882 this_last += HARD_REGNO_NREGS (i, GET_MODE (v->u.val_rtx)) - 1;
2884 if (this_last < regno)
2886 l = &(*l)->next;
2887 continue;
2890 /* We have an overlap. */
2891 unchain_one_elt_list (l);
2893 /* Now, we clear the mapping from value to reg. It must exist, so
2894 this code will crash intentionally if it doesn't. */
2895 for (p = &v->locs; ; p = &(*p)->next)
2897 rtx x = (*p)->loc;
2899 if (GET_CODE (x) == REG && REGNO (x) == i)
2901 unchain_one_elt_loc_list (p);
2902 break;
2905 if (v->locs == 0)
2906 n_useless_values++;
2911 /* The memory at address MEM_BASE is being changed.
2912 Return whether this change will invalidate VAL. */
2914 static int
2915 cselib_mem_conflict_p (mem_base, val)
2916 rtx mem_base;
2917 rtx val;
2919 enum rtx_code code;
2920 const char *fmt;
2921 int i, j;
2923 code = GET_CODE (val);
2924 switch (code)
2926 /* Get rid of a few simple cases quickly. */
2927 case REG:
2928 case PC:
2929 case CC0:
2930 case SCRATCH:
2931 case CONST:
2932 case CONST_INT:
2933 case CONST_DOUBLE:
2934 case SYMBOL_REF:
2935 case LABEL_REF:
2936 return 0;
2938 case MEM:
2939 if (GET_MODE (mem_base) == BLKmode
2940 || GET_MODE (val) == BLKmode
2941 || anti_dependence (val, mem_base))
2942 return 1;
2944 /* The address may contain nested MEMs. */
2945 break;
2947 default:
2948 break;
2951 fmt = GET_RTX_FORMAT (code);
2952 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2954 if (fmt[i] == 'e')
2956 if (cselib_mem_conflict_p (mem_base, XEXP (val, i)))
2957 return 1;
2959 else if (fmt[i] == 'E')
2960 for (j = 0; j < XVECLEN (val, i); j++)
2961 if (cselib_mem_conflict_p (mem_base, XVECEXP (val, i, j)))
2962 return 1;
2965 return 0;
2968 /* For the value found in SLOT, walk its locations to determine if any overlap
2969 INFO (which is a MEM rtx). */
2971 static int
2972 cselib_invalidate_mem_1 (slot, info)
2973 void **slot;
2974 void *info;
2976 cselib_val *v = (cselib_val *) *slot;
2977 rtx mem_rtx = (rtx) info;
2978 struct elt_loc_list **p = &v->locs;
2979 int had_locs = v->locs != 0;
2981 while (*p)
2983 rtx x = (*p)->loc;
2984 cselib_val *addr;
2985 struct elt_list **mem_chain;
2987 /* MEMs may occur in locations only at the top level; below
2988 that every MEM or REG is substituted by its VALUE. */
2989 if (GET_CODE (x) != MEM
2990 || ! cselib_mem_conflict_p (mem_rtx, x))
2992 p = &(*p)->next;
2993 continue;
2996 /* This one overlaps. */
2997 /* We must have a mapping from this MEM's address to the
2998 value (E). Remove that, too. */
2999 addr = cselib_lookup (XEXP (x, 0), VOIDmode, 0);
3000 mem_chain = &addr->addr_list;
3001 for (;;)
3003 if ((*mem_chain)->elt == v)
3005 unchain_one_elt_list (mem_chain);
3006 break;
3009 mem_chain = &(*mem_chain)->next;
3012 unchain_one_elt_loc_list (p);
3015 if (had_locs && v->locs == 0)
3016 n_useless_values++;
3018 return 1;
3021 /* Invalidate any locations in the table which are changed because of a
3022 store to MEM_RTX. If this is called because of a non-const call
3023 instruction, MEM_RTX is (mem:BLK const0_rtx). */
3025 static void
3026 cselib_invalidate_mem (mem_rtx)
3027 rtx mem_rtx;
3029 htab_traverse (hash_table, cselib_invalidate_mem_1, mem_rtx);
3032 /* Invalidate DEST, which is being assigned to or clobbered. The second and
3033 the third parameter exist so that this function can be passed to
3034 note_stores; they are ignored. */
3036 static void
3037 cselib_invalidate_rtx (dest, ignore, data)
3038 rtx dest;
3039 rtx ignore ATTRIBUTE_UNUSED;
3040 void *data ATTRIBUTE_UNUSED;
3042 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SIGN_EXTRACT
3043 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG)
3044 dest = XEXP (dest, 0);
3046 if (GET_CODE (dest) == REG)
3047 cselib_invalidate_regno (REGNO (dest), GET_MODE (dest));
3048 else if (GET_CODE (dest) == MEM)
3049 cselib_invalidate_mem (dest);
3051 /* Some machines don't define AUTO_INC_DEC, but they still use push
3052 instructions. We need to catch that case here in order to
3053 invalidate the stack pointer correctly. Note that invalidating
3054 the stack pointer is different from invalidating DEST. */
3055 if (push_operand (dest, GET_MODE (dest)))
3056 cselib_invalidate_rtx (stack_pointer_rtx, NULL_RTX, NULL);
3059 /* Record the result of a SET instruction. DEST is being set; the source
3060 contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT
3061 describes its address. */
3063 static void
3064 cselib_record_set (dest, src_elt, dest_addr_elt)
3065 rtx dest;
3066 cselib_val *src_elt, *dest_addr_elt;
3068 int dreg = GET_CODE (dest) == REG ? (int) REGNO (dest) : -1;
3070 if (src_elt == 0 || side_effects_p (dest))
3071 return;
3073 if (dreg >= 0)
3075 REG_VALUES (dreg) = new_elt_list (REG_VALUES (dreg), src_elt);
3076 if (src_elt->locs == 0)
3077 n_useless_values--;
3078 src_elt->locs = new_elt_loc_list (src_elt->locs, dest);
3080 else if (GET_CODE (dest) == MEM && dest_addr_elt != 0)
3082 if (src_elt->locs == 0)
3083 n_useless_values--;
3084 add_mem_for_addr (dest_addr_elt, src_elt, dest);
3088 /* Describe a single set that is part of an insn. */
3089 struct set
3091 rtx src;
3092 rtx dest;
3093 cselib_val *src_elt;
3094 cselib_val *dest_addr_elt;
3097 /* There is no good way to determine how many elements there can be
3098 in a PARALLEL. Since it's fairly cheap, use a really large number. */
3099 #define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
3101 /* Record the effects of any sets in INSN. */
3102 static void
3103 cselib_record_sets (insn)
3104 rtx insn;
3106 int n_sets = 0;
3107 int i;
3108 struct set sets[MAX_SETS];
3109 rtx body = PATTERN (insn);
3111 body = PATTERN (insn);
3112 /* Find all sets. */
3113 if (GET_CODE (body) == SET)
3115 sets[0].src = SET_SRC (body);
3116 sets[0].dest = SET_DEST (body);
3117 n_sets = 1;
3119 else if (GET_CODE (body) == PARALLEL)
3121 /* Look through the PARALLEL and record the values being
3122 set, if possible. Also handle any CLOBBERs. */
3123 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
3125 rtx x = XVECEXP (body, 0, i);
3127 if (GET_CODE (x) == SET)
3129 sets[n_sets].src = SET_SRC (x);
3130 sets[n_sets].dest = SET_DEST (x);
3131 n_sets++;
3136 /* Look up the values that are read. Do this before invalidating the
3137 locations that are written. */
3138 for (i = 0; i < n_sets; i++)
3140 sets[i].src_elt = cselib_lookup (sets[i].src, GET_MODE (sets[i].dest),
3142 if (GET_CODE (sets[i].dest) == MEM)
3143 sets[i].dest_addr_elt = cselib_lookup (XEXP (sets[i].dest, 0), Pmode,
3145 else
3146 sets[i].dest_addr_elt = 0;
3149 /* Invalidate all locations written by this insn. Note that the elts we
3150 looked up in the previous loop aren't affected, just some of their
3151 locations may go away. */
3152 note_stores (body, cselib_invalidate_rtx, NULL);
3154 /* Now enter the equivalences in our tables. */
3155 for (i = 0; i < n_sets; i++)
3156 cselib_record_set (sets[i].dest, sets[i].src_elt, sets[i].dest_addr_elt);
3159 /* Record the effects of INSN. */
3161 void
3162 cselib_process_insn (insn)
3163 rtx insn;
3165 int i;
3166 rtx x;
3168 cselib_current_insn = insn;
3170 /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */
3171 if (GET_CODE (insn) == CODE_LABEL
3172 || (GET_CODE (insn) == NOTE
3173 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
3174 || (GET_CODE (insn) == INSN
3175 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
3176 && MEM_VOLATILE_P (PATTERN (insn))))
3178 clear_table ();
3179 return;
3182 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
3184 cselib_current_insn = 0;
3185 return;
3188 /* If this is a call instruction, forget anything stored in a
3189 call clobbered register, or, if this is not a const call, in
3190 memory. */
3191 if (GET_CODE (insn) == CALL_INSN)
3193 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3194 if (call_used_regs[i])
3195 cselib_invalidate_regno (i, VOIDmode);
3197 if (! CONST_CALL_P (insn))
3198 cselib_invalidate_mem (callmem);
3201 cselib_record_sets (insn);
3203 #ifdef AUTO_INC_DEC
3204 /* Clobber any registers which appear in REG_INC notes. We
3205 could keep track of the changes to their values, but it is
3206 unlikely to help. */
3207 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
3208 if (REG_NOTE_KIND (x) == REG_INC)
3209 cselib_invalidate_rtx (XEXP (x, 0), NULL_RTX, NULL);
3210 #endif
3212 /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
3213 after we have processed the insn. */
3214 if (GET_CODE (insn) == CALL_INSN)
3215 for (x = CALL_INSN_FUNCTION_USAGE (insn); x; x = XEXP (x, 1))
3216 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
3217 cselib_invalidate_rtx (XEXP (XEXP (x, 0), 0), NULL_RTX, NULL);
3219 cselib_current_insn = 0;
3221 if (n_useless_values > MAX_USELESS_VALUES)
3222 remove_useless_values ();
3225 /* Make sure our varrays are big enough. Not called from any cselib routines;
3226 it must be called by the user if it allocated new registers. */
3228 void
3229 cselib_update_varray_sizes ()
3231 unsigned int nregs = max_reg_num ();
3233 if (nregs == cselib_nregs)
3234 return;
3236 cselib_nregs = nregs;
3237 VARRAY_GROW (reg_values, nregs);
3240 /* Initialize cselib for one pass. The caller must also call
3241 init_alias_analysis. */
3243 void
3244 cselib_init ()
3246 /* These are only created once. */
3247 if (! callmem)
3249 extern struct obstack permanent_obstack;
3251 gcc_obstack_init (&cselib_obstack);
3252 cselib_startobj = obstack_alloc (&cselib_obstack, 0);
3254 push_obstacks (&permanent_obstack, &permanent_obstack);
3255 callmem = gen_rtx_MEM (BLKmode, const0_rtx);
3256 pop_obstacks ();
3257 ggc_add_rtx_root (&callmem, 1);
3260 cselib_nregs = max_reg_num ();
3261 VARRAY_ELT_LIST_INIT (reg_values, cselib_nregs, "reg_values");
3262 hash_table = htab_create (31, get_value_hash, entry_and_rtx_equal_p, NULL);
3263 clear_table ();
3266 /* Called when the current user is done with cselib. */
3268 void
3269 cselib_finish ()
3271 clear_table ();
3272 htab_delete (hash_table);