Add FR30 to list of targets for which -fpic is inappropriate.
[official-gcc.git] / gcc / cse.c
blob8275974073d39e7c8e59cccfee2baed2202160ff
1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 88, 89, 92-7, 1998, 1999 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 #include "config.h"
23 /* stdio.h must precede rtl.h for FFS. */
24 #include "system.h"
25 #include <setjmp.h>
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "splay-tree.h"
40 #include "ggc.h"
42 /* The basic idea of common subexpression elimination is to go
43 through the code, keeping a record of expressions that would
44 have the same value at the current scan point, and replacing
45 expressions encountered with the cheapest equivalent expression.
47 It is too complicated to keep track of the different possibilities
48 when control paths merge; so, at each label, we forget all that is
49 known and start fresh. This can be described as processing each
50 basic block separately. Note, however, that these are not quite
51 the same as the basic blocks found by a later pass and used for
52 data flow analysis and register packing. We do not need to start fresh
53 after a conditional jump instruction if there is no label there.
55 We use two data structures to record the equivalent expressions:
56 a hash table for most expressions, and several vectors together
57 with "quantity numbers" to record equivalent (pseudo) registers.
59 The use of the special data structure for registers is desirable
60 because it is faster. It is possible because registers references
61 contain a fairly small number, the register number, taken from
62 a contiguously allocated series, and two register references are
63 identical if they have the same number. General expressions
64 do not have any such thing, so the only way to retrieve the
65 information recorded on an expression other than a register
66 is to keep it in a hash table.
68 Registers and "quantity numbers":
70 At the start of each basic block, all of the (hardware and pseudo)
71 registers used in the function are given distinct quantity
72 numbers to indicate their contents. During scan, when the code
73 copies one register into another, we copy the quantity number.
74 When a register is loaded in any other way, we allocate a new
75 quantity number to describe the value generated by this operation.
76 `reg_qty' records what quantity a register is currently thought
77 of as containing.
79 All real quantity numbers are greater than or equal to `max_reg'.
80 If register N has not been assigned a quantity, reg_qty[N] will equal N.
82 Quantity numbers below `max_reg' do not exist and none of the `qty_...'
83 variables should be referenced with an index below `max_reg'.
85 We also maintain a bidirectional chain of registers for each
86 quantity number. `qty_first_reg', `qty_last_reg',
87 `reg_next_eqv' and `reg_prev_eqv' hold these chains.
89 The first register in a chain is the one whose lifespan is least local.
90 Among equals, it is the one that was seen first.
91 We replace any equivalent register with that one.
93 If two registers have the same quantity number, it must be true that
94 REG expressions with `qty_mode' must be in the hash table for both
95 registers and must be in the same class.
97 The converse is not true. Since hard registers may be referenced in
98 any mode, two REG expressions might be equivalent in the hash table
99 but not have the same quantity number if the quantity number of one
100 of the registers is not the same mode as those expressions.
102 Constants and quantity numbers
104 When a quantity has a known constant value, that value is stored
105 in the appropriate element of qty_const. This is in addition to
106 putting the constant in the hash table as is usual for non-regs.
108 Whether a reg or a constant is preferred is determined by the configuration
109 macro CONST_COSTS and will often depend on the constant value. In any
110 event, expressions containing constants can be simplified, by fold_rtx.
112 When a quantity has a known nearly constant value (such as an address
113 of a stack slot), that value is stored in the appropriate element
114 of qty_const.
116 Integer constants don't have a machine mode. However, cse
117 determines the intended machine mode from the destination
118 of the instruction that moves the constant. The machine mode
119 is recorded in the hash table along with the actual RTL
120 constant expression so that different modes are kept separate.
122 Other expressions:
124 To record known equivalences among expressions in general
125 we use a hash table called `table'. It has a fixed number of buckets
126 that contain chains of `struct table_elt' elements for expressions.
127 These chains connect the elements whose expressions have the same
128 hash codes.
130 Other chains through the same elements connect the elements which
131 currently have equivalent values.
133 Register references in an expression are canonicalized before hashing
134 the expression. This is done using `reg_qty' and `qty_first_reg'.
135 The hash code of a register reference is computed using the quantity
136 number, not the register number.
138 When the value of an expression changes, it is necessary to remove from the
139 hash table not just that expression but all expressions whose values
140 could be different as a result.
142 1. If the value changing is in memory, except in special cases
143 ANYTHING referring to memory could be changed. That is because
144 nobody knows where a pointer does not point.
145 The function `invalidate_memory' removes what is necessary.
147 The special cases are when the address is constant or is
148 a constant plus a fixed register such as the frame pointer
149 or a static chain pointer. When such addresses are stored in,
150 we can tell exactly which other such addresses must be invalidated
151 due to overlap. `invalidate' does this.
152 All expressions that refer to non-constant
153 memory addresses are also invalidated. `invalidate_memory' does this.
155 2. If the value changing is a register, all expressions
156 containing references to that register, and only those,
157 must be removed.
159 Because searching the entire hash table for expressions that contain
160 a register is very slow, we try to figure out when it isn't necessary.
161 Precisely, this is necessary only when expressions have been
162 entered in the hash table using this register, and then the value has
163 changed, and then another expression wants to be added to refer to
164 the register's new value. This sequence of circumstances is rare
165 within any one basic block.
167 The vectors `reg_tick' and `reg_in_table' are used to detect this case.
168 reg_tick[i] is incremented whenever a value is stored in register i.
169 reg_in_table[i] holds -1 if no references to register i have been
170 entered in the table; otherwise, it contains the value reg_tick[i] had
171 when the references were entered. If we want to enter a reference
172 and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
173 Until we want to enter a new entry, the mere fact that the two vectors
174 don't match makes the entries be ignored if anyone tries to match them.
176 Registers themselves are entered in the hash table as well as in
177 the equivalent-register chains. However, the vectors `reg_tick'
178 and `reg_in_table' do not apply to expressions which are simple
179 register references. These expressions are removed from the table
180 immediately when they become invalid, and this can be done even if
181 we do not immediately search for all the expressions that refer to
182 the register.
184 A CLOBBER rtx in an instruction invalidates its operand for further
185 reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
186 invalidates everything that resides in memory.
188 Related expressions:
190 Constant expressions that differ only by an additive integer
191 are called related. When a constant expression is put in
192 the table, the related expression with no constant term
193 is also entered. These are made to point at each other
194 so that it is possible to find out if there exists any
195 register equivalent to an expression related to a given expression. */
197 /* One plus largest register number used in this function. */
199 static int max_reg;
201 /* One plus largest instruction UID used in this function at time of
202 cse_main call. */
204 static int max_insn_uid;
206 /* Length of vectors indexed by quantity number.
207 We know in advance we will not need a quantity number this big. */
209 static int max_qty;
211 /* Next quantity number to be allocated.
212 This is 1 + the largest number needed so far. */
214 static int next_qty;
216 /* Indexed by quantity number, gives the first (or last) register
217 in the chain of registers that currently contain this quantity. */
219 static int *qty_first_reg;
220 static int *qty_last_reg;
222 /* Index by quantity number, gives the mode of the quantity. */
224 static enum machine_mode *qty_mode;
226 /* Indexed by quantity number, gives the rtx of the constant value of the
227 quantity, or zero if it does not have a known value.
228 A sum of the frame pointer (or arg pointer) plus a constant
229 can also be entered here. */
231 static rtx *qty_const;
233 /* Indexed by qty number, gives the insn that stored the constant value
234 recorded in `qty_const'. */
236 static rtx *qty_const_insn;
238 /* The next three variables are used to track when a comparison between a
239 quantity and some constant or register has been passed. In that case, we
240 know the results of the comparison in case we see it again. These variables
241 record a comparison that is known to be true. */
243 /* Indexed by qty number, gives the rtx code of a comparison with a known
244 result involving this quantity. If none, it is UNKNOWN. */
245 static enum rtx_code *qty_comparison_code;
247 /* Indexed by qty number, gives the constant being compared against in a
248 comparison of known result. If no such comparison, it is undefined.
249 If the comparison is not with a constant, it is zero. */
251 static rtx *qty_comparison_const;
253 /* Indexed by qty number, gives the quantity being compared against in a
254 comparison of known result. If no such comparison, if it undefined.
255 If the comparison is not with a register, it is -1. */
257 static int *qty_comparison_qty;
259 #ifdef HAVE_cc0
260 /* For machines that have a CC0, we do not record its value in the hash
261 table since its use is guaranteed to be the insn immediately following
262 its definition and any other insn is presumed to invalidate it.
264 Instead, we store below the value last assigned to CC0. If it should
265 happen to be a constant, it is stored in preference to the actual
266 assigned value. In case it is a constant, we store the mode in which
267 the constant should be interpreted. */
269 static rtx prev_insn_cc0;
270 static enum machine_mode prev_insn_cc0_mode;
271 #endif
273 /* Previous actual insn. 0 if at first insn of basic block. */
275 static rtx prev_insn;
277 /* Insn being scanned. */
279 static rtx this_insn;
281 /* Index by register number, gives the number of the next (or
282 previous) register in the chain of registers sharing the same
283 value.
285 Or -1 if this register is at the end of the chain.
287 If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
289 static int *reg_next_eqv;
290 static int *reg_prev_eqv;
292 struct cse_reg_info {
293 union {
294 /* The number of times the register has been altered in the current
295 basic block. */
296 int reg_tick;
298 /* The next cse_reg_info structure in the free list. */
299 struct cse_reg_info* next;
300 } variant;
302 /* The REG_TICK value at which rtx's containing this register are
303 valid in the hash table. If this does not equal the current
304 reg_tick value, such expressions existing in the hash table are
305 invalid. */
306 int reg_in_table;
308 /* The quantity number of the register's current contents. */
309 int reg_qty;
312 /* A free list of cse_reg_info entries. */
313 static struct cse_reg_info *cse_reg_info_free_list;
315 /* A mapping from registers to cse_reg_info data structures. */
316 static splay_tree cse_reg_info_tree;
318 /* The last lookup we did into the cse_reg_info_tree. This allows us
319 to cache repeated lookups. */
320 static int cached_regno;
321 static struct cse_reg_info *cached_cse_reg_info;
323 /* A HARD_REG_SET containing all the hard registers for which there is
324 currently a REG expression in the hash table. Note the difference
325 from the above variables, which indicate if the REG is mentioned in some
326 expression in the table. */
328 static HARD_REG_SET hard_regs_in_table;
330 /* A HARD_REG_SET containing all the hard registers that are invalidated
331 by a CALL_INSN. */
333 static HARD_REG_SET regs_invalidated_by_call;
335 /* CUID of insn that starts the basic block currently being cse-processed. */
337 static int cse_basic_block_start;
339 /* CUID of insn that ends the basic block currently being cse-processed. */
341 static int cse_basic_block_end;
343 /* Vector mapping INSN_UIDs to cuids.
344 The cuids are like uids but increase monotonically always.
345 We use them to see whether a reg is used outside a given basic block. */
347 static int *uid_cuid;
349 /* Highest UID in UID_CUID. */
350 static int max_uid;
352 /* Get the cuid of an insn. */
354 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
356 /* Nonzero if cse has altered conditional jump insns
357 in such a way that jump optimization should be redone. */
359 static int cse_jumps_altered;
361 /* Nonzero if we put a LABEL_REF into the hash table. Since we may have put
362 it into an INSN without a REG_LABEL, we have to rerun jump after CSE
363 to put in the note. */
364 static int recorded_label_ref;
366 /* canon_hash stores 1 in do_not_record
367 if it notices a reference to CC0, PC, or some other volatile
368 subexpression. */
370 static int do_not_record;
372 #ifdef LOAD_EXTEND_OP
374 /* Scratch rtl used when looking for load-extended copy of a MEM. */
375 static rtx memory_extend_rtx;
376 #endif
378 /* canon_hash stores 1 in hash_arg_in_memory
379 if it notices a reference to memory within the expression being hashed. */
381 static int hash_arg_in_memory;
383 /* canon_hash stores 1 in hash_arg_in_struct
384 if it notices a reference to memory that's part of a structure. */
386 static int hash_arg_in_struct;
388 /* The hash table contains buckets which are chains of `struct table_elt's,
389 each recording one expression's information.
390 That expression is in the `exp' field.
392 Those elements with the same hash code are chained in both directions
393 through the `next_same_hash' and `prev_same_hash' fields.
395 Each set of expressions with equivalent values
396 are on a two-way chain through the `next_same_value'
397 and `prev_same_value' fields, and all point with
398 the `first_same_value' field at the first element in
399 that chain. The chain is in order of increasing cost.
400 Each element's cost value is in its `cost' field.
402 The `in_memory' field is nonzero for elements that
403 involve any reference to memory. These elements are removed
404 whenever a write is done to an unidentified location in memory.
405 To be safe, we assume that a memory address is unidentified unless
406 the address is either a symbol constant or a constant plus
407 the frame pointer or argument pointer.
409 The `in_struct' field is nonzero for elements that
410 involve any reference to memory inside a structure or array.
412 The `related_value' field is used to connect related expressions
413 (that differ by adding an integer).
414 The related expressions are chained in a circular fashion.
415 `related_value' is zero for expressions for which this
416 chain is not useful.
418 The `cost' field stores the cost of this element's expression.
420 The `is_const' flag is set if the element is a constant (including
421 a fixed address).
423 The `flag' field is used as a temporary during some search routines.
425 The `mode' field is usually the same as GET_MODE (`exp'), but
426 if `exp' is a CONST_INT and has no machine mode then the `mode'
427 field is the mode it was being used as. Each constant is
428 recorded separately for each mode it is used with. */
431 struct table_elt
433 rtx exp;
434 struct table_elt *next_same_hash;
435 struct table_elt *prev_same_hash;
436 struct table_elt *next_same_value;
437 struct table_elt *prev_same_value;
438 struct table_elt *first_same_value;
439 struct table_elt *related_value;
440 int cost;
441 enum machine_mode mode;
442 char in_memory;
443 char in_struct;
444 char is_const;
445 char flag;
448 /* We don't want a lot of buckets, because we rarely have very many
449 things stored in the hash table, and a lot of buckets slows
450 down a lot of loops that happen frequently. */
451 #define NBUCKETS 31
453 /* Compute hash code of X in mode M. Special-case case where X is a pseudo
454 register (hard registers may require `do_not_record' to be set). */
456 #define HASH(X, M) \
457 (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
458 ? (((unsigned) REG << 7) + (unsigned) REG_QTY (REGNO (X))) % NBUCKETS \
459 : canon_hash (X, M) % NBUCKETS)
461 /* Determine whether register number N is considered a fixed register for CSE.
462 It is desirable to replace other regs with fixed regs, to reduce need for
463 non-fixed hard regs.
464 A reg wins if it is either the frame pointer or designated as fixed,
465 but not if it is an overlapping register. */
466 #ifdef OVERLAPPING_REGNO_P
467 #define FIXED_REGNO_P(N) \
468 (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
469 || fixed_regs[N] || global_regs[N]) \
470 && ! OVERLAPPING_REGNO_P ((N)))
471 #else
472 #define FIXED_REGNO_P(N) \
473 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
474 || fixed_regs[N] || global_regs[N])
475 #endif
477 /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
478 hard registers and pointers into the frame are the cheapest with a cost
479 of 0. Next come pseudos with a cost of one and other hard registers with
480 a cost of 2. Aside from these special cases, call `rtx_cost'. */
482 #define CHEAP_REGNO(N) \
483 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
484 || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
485 || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
486 || ((N) < FIRST_PSEUDO_REGISTER \
487 && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
489 /* A register is cheap if it is a user variable assigned to the register
490 or if its register number always corresponds to a cheap register. */
492 #define CHEAP_REG(N) \
493 ((REG_USERVAR_P (N) && REGNO (N) < FIRST_PSEUDO_REGISTER) \
494 || CHEAP_REGNO (REGNO (N)))
496 #define COST(X) \
497 (GET_CODE (X) == REG \
498 ? (CHEAP_REG (X) ? 0 \
499 : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
500 : 2) \
501 : notreg_cost(X))
503 /* Get the info associated with register N. */
505 #define GET_CSE_REG_INFO(N) \
506 (((N) == cached_regno && cached_cse_reg_info) \
507 ? cached_cse_reg_info : get_cse_reg_info ((N)))
509 /* Get the number of times this register has been updated in this
510 basic block. */
512 #define REG_TICK(N) ((GET_CSE_REG_INFO (N))->variant.reg_tick)
514 /* Get the point at which REG was recorded in the table. */
516 #define REG_IN_TABLE(N) ((GET_CSE_REG_INFO (N))->reg_in_table)
518 /* Get the quantity number for REG. */
520 #define REG_QTY(N) ((GET_CSE_REG_INFO (N))->reg_qty)
522 /* Determine if the quantity number for register X represents a valid index
523 into the `qty_...' variables. */
525 #define REGNO_QTY_VALID_P(N) (REG_QTY (N) != (N))
527 #ifdef ADDRESS_COST
528 /* The ADDRESS_COST macro does not deal with ADDRESSOF nodes. But,
529 during CSE, such nodes are present. Using an ADDRESSOF node which
530 refers to the address of a REG is a good thing because we can then
531 turn (MEM (ADDRESSSOF (REG))) into just plain REG. */
532 #define CSE_ADDRESS_COST(RTX) \
533 ((GET_CODE (RTX) == ADDRESSOF && REG_P (XEXP ((RTX), 0))) \
534 ? -1 : ADDRESS_COST(RTX))
535 #endif
537 static struct table_elt *table[NBUCKETS];
539 /* Chain of `struct table_elt's made so far for this function
540 but currently removed from the table. */
542 static struct table_elt *free_element_chain;
544 /* Number of `struct table_elt' structures made so far for this function. */
546 static int n_elements_made;
548 /* Maximum value `n_elements_made' has had so far in this compilation
549 for functions previously processed. */
551 static int max_elements_made;
553 /* Surviving equivalence class when two equivalence classes are merged
554 by recording the effects of a jump in the last insn. Zero if the
555 last insn was not a conditional jump. */
557 static struct table_elt *last_jump_equiv_class;
559 /* Set to the cost of a constant pool reference if one was found for a
560 symbolic constant. If this was found, it means we should try to
561 convert constants into constant pool entries if they don't fit in
562 the insn. */
564 static int constant_pool_entries_cost;
566 /* Define maximum length of a branch path. */
568 #define PATHLENGTH 10
570 /* This data describes a block that will be processed by cse_basic_block. */
572 struct cse_basic_block_data {
573 /* Lowest CUID value of insns in block. */
574 int low_cuid;
575 /* Highest CUID value of insns in block. */
576 int high_cuid;
577 /* Total number of SETs in block. */
578 int nsets;
579 /* Last insn in the block. */
580 rtx last;
581 /* Size of current branch path, if any. */
582 int path_size;
583 /* Current branch path, indicating which branches will be taken. */
584 struct branch_path {
585 /* The branch insn. */
586 rtx branch;
587 /* Whether it should be taken or not. AROUND is the same as taken
588 except that it is used when the destination label is not preceded
589 by a BARRIER. */
590 enum taken {TAKEN, NOT_TAKEN, AROUND} status;
591 } path[PATHLENGTH];
594 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
595 virtual regs here because the simplify_*_operation routines are called
596 by integrate.c, which is called before virtual register instantiation. */
598 #define FIXED_BASE_PLUS_P(X) \
599 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
600 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
601 || (X) == virtual_stack_vars_rtx \
602 || (X) == virtual_incoming_args_rtx \
603 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
604 && (XEXP (X, 0) == frame_pointer_rtx \
605 || XEXP (X, 0) == hard_frame_pointer_rtx \
606 || ((X) == arg_pointer_rtx \
607 && fixed_regs[ARG_POINTER_REGNUM]) \
608 || XEXP (X, 0) == virtual_stack_vars_rtx \
609 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
610 || GET_CODE (X) == ADDRESSOF)
612 /* Similar, but also allows reference to the stack pointer.
614 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
615 arg_pointer_rtx by itself is nonzero, because on at least one machine,
616 the i960, the arg pointer is zero when it is unused. */
618 #define NONZERO_BASE_PLUS_P(X) \
619 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
620 || (X) == virtual_stack_vars_rtx \
621 || (X) == virtual_incoming_args_rtx \
622 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
623 && (XEXP (X, 0) == frame_pointer_rtx \
624 || XEXP (X, 0) == hard_frame_pointer_rtx \
625 || ((X) == arg_pointer_rtx \
626 && fixed_regs[ARG_POINTER_REGNUM]) \
627 || XEXP (X, 0) == virtual_stack_vars_rtx \
628 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
629 || (X) == stack_pointer_rtx \
630 || (X) == virtual_stack_dynamic_rtx \
631 || (X) == virtual_outgoing_args_rtx \
632 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
633 && (XEXP (X, 0) == stack_pointer_rtx \
634 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
635 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
636 || GET_CODE (X) == ADDRESSOF)
638 static int notreg_cost PROTO((rtx));
639 static void new_basic_block PROTO((void));
640 static void make_new_qty PROTO((int));
641 static void make_regs_eqv PROTO((int, int));
642 static void delete_reg_equiv PROTO((int));
643 static int mention_regs PROTO((rtx));
644 static int insert_regs PROTO((rtx, struct table_elt *, int));
645 static void free_element PROTO((struct table_elt *));
646 static void remove_from_table PROTO((struct table_elt *, unsigned));
647 static struct table_elt *get_element PROTO((void));
648 static struct table_elt *lookup PROTO((rtx, unsigned, enum machine_mode)),
649 *lookup_for_remove PROTO((rtx, unsigned, enum machine_mode));
650 static rtx lookup_as_function PROTO((rtx, enum rtx_code));
651 static struct table_elt *insert PROTO((rtx, struct table_elt *, unsigned,
652 enum machine_mode));
653 static void merge_equiv_classes PROTO((struct table_elt *,
654 struct table_elt *));
655 static void invalidate PROTO((rtx, enum machine_mode));
656 static int cse_rtx_varies_p PROTO((rtx));
657 static void remove_invalid_refs PROTO((int));
658 static void remove_invalid_subreg_refs PROTO((int, int, enum machine_mode));
659 static void rehash_using_reg PROTO((rtx));
660 static void invalidate_memory PROTO((void));
661 static void invalidate_for_call PROTO((void));
662 static rtx use_related_value PROTO((rtx, struct table_elt *));
663 static unsigned canon_hash PROTO((rtx, enum machine_mode));
664 static unsigned safe_hash PROTO((rtx, enum machine_mode));
665 static int exp_equiv_p PROTO((rtx, rtx, int, int));
666 static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
667 HOST_WIDE_INT *,
668 HOST_WIDE_INT *));
669 static int refers_to_p PROTO((rtx, rtx));
670 static rtx canon_reg PROTO((rtx, rtx));
671 static void find_best_addr PROTO((rtx, rtx *));
672 static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
673 enum machine_mode *,
674 enum machine_mode *));
675 static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
676 rtx, rtx));
677 static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
678 rtx, rtx));
679 static rtx fold_rtx PROTO((rtx, rtx));
680 static rtx equiv_constant PROTO((rtx));
681 static void record_jump_equiv PROTO((rtx, int));
682 static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
683 rtx, rtx, int));
684 static void cse_insn PROTO((rtx, rtx));
685 static int note_mem_written PROTO((rtx));
686 static void invalidate_from_clobbers PROTO((rtx));
687 static rtx cse_process_notes PROTO((rtx, rtx));
688 static void cse_around_loop PROTO((rtx));
689 static void invalidate_skipped_set PROTO((rtx, rtx));
690 static void invalidate_skipped_block PROTO((rtx));
691 static void cse_check_loop_start PROTO((rtx, rtx));
692 static void cse_set_around_loop PROTO((rtx, rtx, rtx));
693 static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
694 static void count_reg_usage PROTO((rtx, int *, rtx, int));
695 extern void dump_class PROTO((struct table_elt*));
696 static void check_fold_consts PROTO((PTR));
697 static struct cse_reg_info* get_cse_reg_info PROTO((int));
698 static void free_cse_reg_info PROTO((splay_tree_value));
699 static void flush_hash_table PROTO((void));
701 /* Dump the expressions in the equivalence class indicated by CLASSP.
702 This function is used only for debugging. */
703 void
704 dump_class (classp)
705 struct table_elt *classp;
707 struct table_elt *elt;
709 fprintf (stderr, "Equivalence chain for ");
710 print_rtl (stderr, classp->exp);
711 fprintf (stderr, ": \n");
713 for (elt = classp->first_same_value; elt; elt = elt->next_same_value)
715 print_rtl (stderr, elt->exp);
716 fprintf (stderr, "\n");
720 /* Return an estimate of the cost of computing rtx X.
721 One use is in cse, to decide which expression to keep in the hash table.
722 Another is in rtl generation, to pick the cheapest way to multiply.
723 Other uses like the latter are expected in the future. */
725 /* Internal function, to compute cost when X is not a register; called
726 from COST macro to keep it simple. */
728 static int
729 notreg_cost (x)
730 rtx x;
732 return ((GET_CODE (x) == SUBREG
733 && GET_CODE (SUBREG_REG (x)) == REG
734 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
735 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT
736 && (GET_MODE_SIZE (GET_MODE (x))
737 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
738 && subreg_lowpart_p (x)
739 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)),
740 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))))
741 ? (CHEAP_REG (SUBREG_REG (x)) ? 0
742 : (REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER ? 1
743 : 2))
744 : rtx_cost (x, SET) * 2);
747 /* Return the right cost to give to an operation
748 to make the cost of the corresponding register-to-register instruction
749 N times that of a fast register-to-register instruction. */
751 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
754 rtx_cost (x, outer_code)
755 rtx x;
756 enum rtx_code outer_code ATTRIBUTE_UNUSED;
758 register int i, j;
759 register enum rtx_code code;
760 register const char *fmt;
761 register int total;
763 if (x == 0)
764 return 0;
766 /* Compute the default costs of certain things.
767 Note that RTX_COSTS can override the defaults. */
769 code = GET_CODE (x);
770 switch (code)
772 case MULT:
773 /* Count multiplication by 2**n as a shift,
774 because if we are considering it, we would output it as a shift. */
775 if (GET_CODE (XEXP (x, 1)) == CONST_INT
776 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
777 total = 2;
778 else
779 total = COSTS_N_INSNS (5);
780 break;
781 case DIV:
782 case UDIV:
783 case MOD:
784 case UMOD:
785 total = COSTS_N_INSNS (7);
786 break;
787 case USE:
788 /* Used in loop.c and combine.c as a marker. */
789 total = 0;
790 break;
791 case ASM_OPERANDS:
792 /* We don't want these to be used in substitutions because
793 we have no way of validating the resulting insn. So assign
794 anything containing an ASM_OPERANDS a very high cost. */
795 total = 1000;
796 break;
797 default:
798 total = 2;
801 switch (code)
803 case REG:
804 return ! CHEAP_REG (x);
806 case SUBREG:
807 /* If we can't tie these modes, make this expensive. The larger
808 the mode, the more expensive it is. */
809 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
810 return COSTS_N_INSNS (2
811 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
812 return 2;
813 #ifdef RTX_COSTS
814 RTX_COSTS (x, code, outer_code);
815 #endif
816 #ifdef CONST_COSTS
817 CONST_COSTS (x, code, outer_code);
818 #endif
820 default:
821 #ifdef DEFAULT_RTX_COSTS
822 DEFAULT_RTX_COSTS(x, code, outer_code);
823 #endif
824 break;
827 /* Sum the costs of the sub-rtx's, plus cost of this operation,
828 which is already in total. */
830 fmt = GET_RTX_FORMAT (code);
831 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
832 if (fmt[i] == 'e')
833 total += rtx_cost (XEXP (x, i), code);
834 else if (fmt[i] == 'E')
835 for (j = 0; j < XVECLEN (x, i); j++)
836 total += rtx_cost (XVECEXP (x, i, j), code);
838 return total;
841 static struct cse_reg_info *
842 get_cse_reg_info (regno)
843 int regno;
845 struct cse_reg_info *cri;
846 splay_tree_node n;
848 /* See if we already have this entry. */
849 n = splay_tree_lookup (cse_reg_info_tree,
850 (splay_tree_key) regno);
851 if (n)
852 cri = (struct cse_reg_info *) (n->value);
853 else
855 /* Get a new cse_reg_info structure. */
856 if (cse_reg_info_free_list)
858 cri = cse_reg_info_free_list;
859 cse_reg_info_free_list = cri->variant.next;
861 else
862 cri = (struct cse_reg_info *) xmalloc (sizeof (struct cse_reg_info));
864 /* Initialize it. */
865 cri->variant.reg_tick = 0;
866 cri->reg_in_table = -1;
867 cri->reg_qty = regno;
869 splay_tree_insert (cse_reg_info_tree,
870 (splay_tree_key) regno,
871 (splay_tree_value) cri);
874 /* Cache this lookup; we tend to be looking up information about the
875 same register several times in a row. */
876 cached_regno = regno;
877 cached_cse_reg_info = cri;
879 return cri;
882 static void
883 free_cse_reg_info (v)
884 splay_tree_value v;
886 struct cse_reg_info *cri = (struct cse_reg_info *) v;
888 cri->variant.next = cse_reg_info_free_list;
889 cse_reg_info_free_list = cri;
892 /* Clear the hash table and initialize each register with its own quantity,
893 for a new basic block. */
895 static void
896 new_basic_block ()
898 register int i;
900 next_qty = max_reg;
902 if (cse_reg_info_tree)
904 splay_tree_delete (cse_reg_info_tree);
905 cached_cse_reg_info = 0;
908 cse_reg_info_tree = splay_tree_new (splay_tree_compare_ints, 0,
909 free_cse_reg_info);
911 CLEAR_HARD_REG_SET (hard_regs_in_table);
913 /* The per-quantity values used to be initialized here, but it is
914 much faster to initialize each as it is made in `make_new_qty'. */
916 for (i = 0; i < NBUCKETS; i++)
918 register struct table_elt *this, *next;
919 for (this = table[i]; this; this = next)
921 next = this->next_same_hash;
922 free_element (this);
926 bzero ((char *) table, sizeof table);
928 prev_insn = 0;
930 #ifdef HAVE_cc0
931 prev_insn_cc0 = 0;
932 #endif
935 /* Say that register REG contains a quantity not in any register before
936 and initialize that quantity. */
938 static void
939 make_new_qty (reg)
940 register int reg;
942 register int q;
944 if (next_qty >= max_qty)
945 abort ();
947 q = REG_QTY (reg) = next_qty++;
948 qty_first_reg[q] = reg;
949 qty_last_reg[q] = reg;
950 qty_const[q] = qty_const_insn[q] = 0;
951 qty_comparison_code[q] = UNKNOWN;
953 reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
956 /* Make reg NEW equivalent to reg OLD.
957 OLD is not changing; NEW is. */
959 static void
960 make_regs_eqv (new, old)
961 register int new, old;
963 register int lastr, firstr;
964 register int q = REG_QTY (old);
966 /* Nothing should become eqv until it has a "non-invalid" qty number. */
967 if (! REGNO_QTY_VALID_P (old))
968 abort ();
970 REG_QTY (new) = q;
971 firstr = qty_first_reg[q];
972 lastr = qty_last_reg[q];
974 /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
975 hard regs. Among pseudos, if NEW will live longer than any other reg
976 of the same qty, and that is beyond the current basic block,
977 make it the new canonical replacement for this qty. */
978 if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
979 /* Certain fixed registers might be of the class NO_REGS. This means
980 that not only can they not be allocated by the compiler, but
981 they cannot be used in substitutions or canonicalizations
982 either. */
983 && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
984 && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
985 || (new >= FIRST_PSEUDO_REGISTER
986 && (firstr < FIRST_PSEUDO_REGISTER
987 || ((uid_cuid[REGNO_LAST_UID (new)] > cse_basic_block_end
988 || (uid_cuid[REGNO_FIRST_UID (new)]
989 < cse_basic_block_start))
990 && (uid_cuid[REGNO_LAST_UID (new)]
991 > uid_cuid[REGNO_LAST_UID (firstr)]))))))
993 reg_prev_eqv[firstr] = new;
994 reg_next_eqv[new] = firstr;
995 reg_prev_eqv[new] = -1;
996 qty_first_reg[q] = new;
998 else
1000 /* If NEW is a hard reg (known to be non-fixed), insert at end.
1001 Otherwise, insert before any non-fixed hard regs that are at the
1002 end. Registers of class NO_REGS cannot be used as an
1003 equivalent for anything. */
1004 while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
1005 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
1006 && new >= FIRST_PSEUDO_REGISTER)
1007 lastr = reg_prev_eqv[lastr];
1008 reg_next_eqv[new] = reg_next_eqv[lastr];
1009 if (reg_next_eqv[lastr] >= 0)
1010 reg_prev_eqv[reg_next_eqv[lastr]] = new;
1011 else
1012 qty_last_reg[q] = new;
1013 reg_next_eqv[lastr] = new;
1014 reg_prev_eqv[new] = lastr;
1018 /* Remove REG from its equivalence class. */
1020 static void
1021 delete_reg_equiv (reg)
1022 register int reg;
1024 register int q = REG_QTY (reg);
1025 register int p, n;
1027 /* If invalid, do nothing. */
1028 if (q == reg)
1029 return;
1031 p = reg_prev_eqv[reg];
1032 n = reg_next_eqv[reg];
1034 if (n != -1)
1035 reg_prev_eqv[n] = p;
1036 else
1037 qty_last_reg[q] = p;
1038 if (p != -1)
1039 reg_next_eqv[p] = n;
1040 else
1041 qty_first_reg[q] = n;
1043 REG_QTY (reg) = reg;
1046 /* Remove any invalid expressions from the hash table
1047 that refer to any of the registers contained in expression X.
1049 Make sure that newly inserted references to those registers
1050 as subexpressions will be considered valid.
1052 mention_regs is not called when a register itself
1053 is being stored in the table.
1055 Return 1 if we have done something that may have changed the hash code
1056 of X. */
1058 static int
1059 mention_regs (x)
1060 rtx x;
1062 register enum rtx_code code;
1063 register int i, j;
1064 register const char *fmt;
1065 register int changed = 0;
1067 if (x == 0)
1068 return 0;
1070 code = GET_CODE (x);
1071 if (code == REG)
1073 register int regno = REGNO (x);
1074 register int endregno
1075 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
1076 : HARD_REGNO_NREGS (regno, GET_MODE (x)));
1077 int i;
1079 for (i = regno; i < endregno; i++)
1081 if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i))
1082 remove_invalid_refs (i);
1084 REG_IN_TABLE (i) = REG_TICK (i);
1087 return 0;
1090 /* If this is a SUBREG, we don't want to discard other SUBREGs of the same
1091 pseudo if they don't use overlapping words. We handle only pseudos
1092 here for simplicity. */
1093 if (code == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
1094 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
1096 int i = REGNO (SUBREG_REG (x));
1098 if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i))
1100 /* If reg_tick has been incremented more than once since
1101 reg_in_table was last set, that means that the entire
1102 register has been set before, so discard anything memorized
1103 for the entrire register, including all SUBREG expressions. */
1104 if (REG_IN_TABLE (i) != REG_TICK (i) - 1)
1105 remove_invalid_refs (i);
1106 else
1107 remove_invalid_subreg_refs (i, SUBREG_WORD (x), GET_MODE (x));
1110 REG_IN_TABLE (i) = REG_TICK (i);
1111 return 0;
1114 /* If X is a comparison or a COMPARE and either operand is a register
1115 that does not have a quantity, give it one. This is so that a later
1116 call to record_jump_equiv won't cause X to be assigned a different
1117 hash code and not found in the table after that call.
1119 It is not necessary to do this here, since rehash_using_reg can
1120 fix up the table later, but doing this here eliminates the need to
1121 call that expensive function in the most common case where the only
1122 use of the register is in the comparison. */
1124 if (code == COMPARE || GET_RTX_CLASS (code) == '<')
1126 if (GET_CODE (XEXP (x, 0)) == REG
1127 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
1128 if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
1130 rehash_using_reg (XEXP (x, 0));
1131 changed = 1;
1134 if (GET_CODE (XEXP (x, 1)) == REG
1135 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
1136 if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
1138 rehash_using_reg (XEXP (x, 1));
1139 changed = 1;
1143 fmt = GET_RTX_FORMAT (code);
1144 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1145 if (fmt[i] == 'e')
1146 changed |= mention_regs (XEXP (x, i));
1147 else if (fmt[i] == 'E')
1148 for (j = 0; j < XVECLEN (x, i); j++)
1149 changed |= mention_regs (XVECEXP (x, i, j));
1151 return changed;
1154 /* Update the register quantities for inserting X into the hash table
1155 with a value equivalent to CLASSP.
1156 (If the class does not contain a REG, it is irrelevant.)
1157 If MODIFIED is nonzero, X is a destination; it is being modified.
1158 Note that delete_reg_equiv should be called on a register
1159 before insert_regs is done on that register with MODIFIED != 0.
1161 Nonzero value means that elements of reg_qty have changed
1162 so X's hash code may be different. */
1164 static int
1165 insert_regs (x, classp, modified)
1166 rtx x;
1167 struct table_elt *classp;
1168 int modified;
1170 if (GET_CODE (x) == REG)
1172 register int regno = REGNO (x);
1174 /* If REGNO is in the equivalence table already but is of the
1175 wrong mode for that equivalence, don't do anything here. */
1177 if (REGNO_QTY_VALID_P (regno)
1178 && qty_mode[REG_QTY (regno)] != GET_MODE (x))
1179 return 0;
1181 if (modified || ! REGNO_QTY_VALID_P (regno))
1183 if (classp)
1184 for (classp = classp->first_same_value;
1185 classp != 0;
1186 classp = classp->next_same_value)
1187 if (GET_CODE (classp->exp) == REG
1188 && GET_MODE (classp->exp) == GET_MODE (x))
1190 make_regs_eqv (regno, REGNO (classp->exp));
1191 return 1;
1194 make_new_qty (regno);
1195 qty_mode[REG_QTY (regno)] = GET_MODE (x);
1196 return 1;
1199 return 0;
1202 /* If X is a SUBREG, we will likely be inserting the inner register in the
1203 table. If that register doesn't have an assigned quantity number at
1204 this point but does later, the insertion that we will be doing now will
1205 not be accessible because its hash code will have changed. So assign
1206 a quantity number now. */
1208 else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
1209 && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
1211 int regno = REGNO (SUBREG_REG (x));
1213 insert_regs (SUBREG_REG (x), NULL_PTR, 0);
1214 /* Mention_regs checks if REG_TICK is exactly one larger than
1215 REG_IN_TABLE to find out if there was only a single preceding
1216 invalidation - for the SUBREG - or another one, which would be
1217 for the full register. Since we don't invalidate the SUBREG
1218 here first, we might have to bump up REG_TICK so that mention_regs
1219 will do the right thing. */
1220 if (REG_IN_TABLE (regno) >= 0
1221 && REG_TICK (regno) == REG_IN_TABLE (regno) + 1)
1222 REG_TICK (regno)++;
1223 mention_regs (x);
1224 return 1;
1226 else
1227 return mention_regs (x);
1230 /* Look in or update the hash table. */
1232 /* Put the element ELT on the list of free elements. */
1234 static void
1235 free_element (elt)
1236 struct table_elt *elt;
1238 elt->next_same_hash = free_element_chain;
1239 free_element_chain = elt;
1242 /* Return an element that is free for use. */
1244 static struct table_elt *
1245 get_element ()
1247 struct table_elt *elt = free_element_chain;
1248 if (elt)
1250 free_element_chain = elt->next_same_hash;
1251 return elt;
1253 n_elements_made++;
1254 return (struct table_elt *) oballoc (sizeof (struct table_elt));
1257 /* Remove table element ELT from use in the table.
1258 HASH is its hash code, made using the HASH macro.
1259 It's an argument because often that is known in advance
1260 and we save much time not recomputing it. */
1262 static void
1263 remove_from_table (elt, hash)
1264 register struct table_elt *elt;
1265 unsigned hash;
1267 if (elt == 0)
1268 return;
1270 /* Mark this element as removed. See cse_insn. */
1271 elt->first_same_value = 0;
1273 /* Remove the table element from its equivalence class. */
1276 register struct table_elt *prev = elt->prev_same_value;
1277 register struct table_elt *next = elt->next_same_value;
1279 if (next) next->prev_same_value = prev;
1281 if (prev)
1282 prev->next_same_value = next;
1283 else
1285 register struct table_elt *newfirst = next;
1286 while (next)
1288 next->first_same_value = newfirst;
1289 next = next->next_same_value;
1294 /* Remove the table element from its hash bucket. */
1297 register struct table_elt *prev = elt->prev_same_hash;
1298 register struct table_elt *next = elt->next_same_hash;
1300 if (next) next->prev_same_hash = prev;
1302 if (prev)
1303 prev->next_same_hash = next;
1304 else if (table[hash] == elt)
1305 table[hash] = next;
1306 else
1308 /* This entry is not in the proper hash bucket. This can happen
1309 when two classes were merged by `merge_equiv_classes'. Search
1310 for the hash bucket that it heads. This happens only very
1311 rarely, so the cost is acceptable. */
1312 for (hash = 0; hash < NBUCKETS; hash++)
1313 if (table[hash] == elt)
1314 table[hash] = next;
1318 /* Remove the table element from its related-value circular chain. */
1320 if (elt->related_value != 0 && elt->related_value != elt)
1322 register struct table_elt *p = elt->related_value;
1323 while (p->related_value != elt)
1324 p = p->related_value;
1325 p->related_value = elt->related_value;
1326 if (p->related_value == p)
1327 p->related_value = 0;
1330 free_element (elt);
1333 /* Look up X in the hash table and return its table element,
1334 or 0 if X is not in the table.
1336 MODE is the machine-mode of X, or if X is an integer constant
1337 with VOIDmode then MODE is the mode with which X will be used.
1339 Here we are satisfied to find an expression whose tree structure
1340 looks like X. */
1342 static struct table_elt *
1343 lookup (x, hash, mode)
1344 rtx x;
1345 unsigned hash;
1346 enum machine_mode mode;
1348 register struct table_elt *p;
1350 for (p = table[hash]; p; p = p->next_same_hash)
1351 if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
1352 || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
1353 return p;
1355 return 0;
1358 /* Like `lookup' but don't care whether the table element uses invalid regs.
1359 Also ignore discrepancies in the machine mode of a register. */
1361 static struct table_elt *
1362 lookup_for_remove (x, hash, mode)
1363 rtx x;
1364 unsigned hash;
1365 enum machine_mode mode;
1367 register struct table_elt *p;
1369 if (GET_CODE (x) == REG)
1371 int regno = REGNO (x);
1372 /* Don't check the machine mode when comparing registers;
1373 invalidating (REG:SI 0) also invalidates (REG:DF 0). */
1374 for (p = table[hash]; p; p = p->next_same_hash)
1375 if (GET_CODE (p->exp) == REG
1376 && REGNO (p->exp) == regno)
1377 return p;
1379 else
1381 for (p = table[hash]; p; p = p->next_same_hash)
1382 if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
1383 return p;
1386 return 0;
1389 /* Look for an expression equivalent to X and with code CODE.
1390 If one is found, return that expression. */
1392 static rtx
1393 lookup_as_function (x, code)
1394 rtx x;
1395 enum rtx_code code;
1397 register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
1398 GET_MODE (x));
1399 /* If we are looking for a CONST_INT, the mode doesn't really matter, as
1400 long as we are narrowing. So if we looked in vain for a mode narrower
1401 than word_mode before, look for word_mode now. */
1402 if (p == 0 && code == CONST_INT
1403 && GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (word_mode))
1405 x = copy_rtx (x);
1406 PUT_MODE (x, word_mode);
1407 p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS, word_mode);
1410 if (p == 0)
1411 return 0;
1413 for (p = p->first_same_value; p; p = p->next_same_value)
1415 if (GET_CODE (p->exp) == code
1416 /* Make sure this is a valid entry in the table. */
1417 && exp_equiv_p (p->exp, p->exp, 1, 0))
1418 return p->exp;
1421 return 0;
1424 /* Insert X in the hash table, assuming HASH is its hash code
1425 and CLASSP is an element of the class it should go in
1426 (or 0 if a new class should be made).
1427 It is inserted at the proper position to keep the class in
1428 the order cheapest first.
1430 MODE is the machine-mode of X, or if X is an integer constant
1431 with VOIDmode then MODE is the mode with which X will be used.
1433 For elements of equal cheapness, the most recent one
1434 goes in front, except that the first element in the list
1435 remains first unless a cheaper element is added. The order of
1436 pseudo-registers does not matter, as canon_reg will be called to
1437 find the cheapest when a register is retrieved from the table.
1439 The in_memory field in the hash table element is set to 0.
1440 The caller must set it nonzero if appropriate.
1442 You should call insert_regs (X, CLASSP, MODIFY) before calling here,
1443 and if insert_regs returns a nonzero value
1444 you must then recompute its hash code before calling here.
1446 If necessary, update table showing constant values of quantities. */
1448 #define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
1450 static struct table_elt *
1451 insert (x, classp, hash, mode)
1452 register rtx x;
1453 register struct table_elt *classp;
1454 unsigned hash;
1455 enum machine_mode mode;
1457 register struct table_elt *elt;
1459 /* If X is a register and we haven't made a quantity for it,
1460 something is wrong. */
1461 if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
1462 abort ();
1464 /* If X is a hard register, show it is being put in the table. */
1465 if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
1467 int regno = REGNO (x);
1468 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1469 int i;
1471 for (i = regno; i < endregno; i++)
1472 SET_HARD_REG_BIT (hard_regs_in_table, i);
1475 /* If X is a label, show we recorded it. */
1476 if (GET_CODE (x) == LABEL_REF
1477 || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
1478 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF))
1479 recorded_label_ref = 1;
1481 /* Put an element for X into the right hash bucket. */
1483 elt = get_element ();
1484 elt->exp = x;
1485 elt->cost = COST (x);
1486 elt->next_same_value = 0;
1487 elt->prev_same_value = 0;
1488 elt->next_same_hash = table[hash];
1489 elt->prev_same_hash = 0;
1490 elt->related_value = 0;
1491 elt->in_memory = 0;
1492 elt->mode = mode;
1493 elt->is_const = (CONSTANT_P (x)
1494 /* GNU C++ takes advantage of this for `this'
1495 (and other const values). */
1496 || (RTX_UNCHANGING_P (x)
1497 && GET_CODE (x) == REG
1498 && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1499 || FIXED_BASE_PLUS_P (x));
1501 if (table[hash])
1502 table[hash]->prev_same_hash = elt;
1503 table[hash] = elt;
1505 /* Put it into the proper value-class. */
1506 if (classp)
1508 classp = classp->first_same_value;
1509 if (CHEAPER (elt, classp))
1510 /* Insert at the head of the class */
1512 register struct table_elt *p;
1513 elt->next_same_value = classp;
1514 classp->prev_same_value = elt;
1515 elt->first_same_value = elt;
1517 for (p = classp; p; p = p->next_same_value)
1518 p->first_same_value = elt;
1520 else
1522 /* Insert not at head of the class. */
1523 /* Put it after the last element cheaper than X. */
1524 register struct table_elt *p, *next;
1525 for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
1526 p = next);
1527 /* Put it after P and before NEXT. */
1528 elt->next_same_value = next;
1529 if (next)
1530 next->prev_same_value = elt;
1531 elt->prev_same_value = p;
1532 p->next_same_value = elt;
1533 elt->first_same_value = classp;
1536 else
1537 elt->first_same_value = elt;
1539 /* If this is a constant being set equivalent to a register or a register
1540 being set equivalent to a constant, note the constant equivalence.
1542 If this is a constant, it cannot be equivalent to a different constant,
1543 and a constant is the only thing that can be cheaper than a register. So
1544 we know the register is the head of the class (before the constant was
1545 inserted).
1547 If this is a register that is not already known equivalent to a
1548 constant, we must check the entire class.
1550 If this is a register that is already known equivalent to an insn,
1551 update `qty_const_insn' to show that `this_insn' is the latest
1552 insn making that quantity equivalent to the constant. */
1554 if (elt->is_const && classp && GET_CODE (classp->exp) == REG
1555 && GET_CODE (x) != REG)
1557 qty_const[REG_QTY (REGNO (classp->exp))]
1558 = gen_lowpart_if_possible (qty_mode[REG_QTY (REGNO (classp->exp))], x);
1559 qty_const_insn[REG_QTY (REGNO (classp->exp))] = this_insn;
1562 else if (GET_CODE (x) == REG && classp && ! qty_const[REG_QTY (REGNO (x))]
1563 && ! elt->is_const)
1565 register struct table_elt *p;
1567 for (p = classp; p != 0; p = p->next_same_value)
1569 if (p->is_const && GET_CODE (p->exp) != REG)
1571 qty_const[REG_QTY (REGNO (x))]
1572 = gen_lowpart_if_possible (GET_MODE (x), p->exp);
1573 qty_const_insn[REG_QTY (REGNO (x))] = this_insn;
1574 break;
1579 else if (GET_CODE (x) == REG && qty_const[REG_QTY (REGNO (x))]
1580 && GET_MODE (x) == qty_mode[REG_QTY (REGNO (x))])
1581 qty_const_insn[REG_QTY (REGNO (x))] = this_insn;
1583 /* If this is a constant with symbolic value,
1584 and it has a term with an explicit integer value,
1585 link it up with related expressions. */
1586 if (GET_CODE (x) == CONST)
1588 rtx subexp = get_related_value (x);
1589 unsigned subhash;
1590 struct table_elt *subelt, *subelt_prev;
1592 if (subexp != 0)
1594 /* Get the integer-free subexpression in the hash table. */
1595 subhash = safe_hash (subexp, mode) % NBUCKETS;
1596 subelt = lookup (subexp, subhash, mode);
1597 if (subelt == 0)
1598 subelt = insert (subexp, NULL_PTR, subhash, mode);
1599 /* Initialize SUBELT's circular chain if it has none. */
1600 if (subelt->related_value == 0)
1601 subelt->related_value = subelt;
1602 /* Find the element in the circular chain that precedes SUBELT. */
1603 subelt_prev = subelt;
1604 while (subelt_prev->related_value != subelt)
1605 subelt_prev = subelt_prev->related_value;
1606 /* Put new ELT into SUBELT's circular chain just before SUBELT.
1607 This way the element that follows SUBELT is the oldest one. */
1608 elt->related_value = subelt_prev->related_value;
1609 subelt_prev->related_value = elt;
1613 return elt;
1616 /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
1617 CLASS2 into CLASS1. This is done when we have reached an insn which makes
1618 the two classes equivalent.
1620 CLASS1 will be the surviving class; CLASS2 should not be used after this
1621 call.
1623 Any invalid entries in CLASS2 will not be copied. */
1625 static void
1626 merge_equiv_classes (class1, class2)
1627 struct table_elt *class1, *class2;
1629 struct table_elt *elt, *next, *new;
1631 /* Ensure we start with the head of the classes. */
1632 class1 = class1->first_same_value;
1633 class2 = class2->first_same_value;
1635 /* If they were already equal, forget it. */
1636 if (class1 == class2)
1637 return;
1639 for (elt = class2; elt; elt = next)
1641 unsigned hash;
1642 rtx exp = elt->exp;
1643 enum machine_mode mode = elt->mode;
1645 next = elt->next_same_value;
1647 /* Remove old entry, make a new one in CLASS1's class.
1648 Don't do this for invalid entries as we cannot find their
1649 hash code (it also isn't necessary). */
1650 if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
1652 hash_arg_in_memory = 0;
1653 hash_arg_in_struct = 0;
1654 hash = HASH (exp, mode);
1656 if (GET_CODE (exp) == REG)
1657 delete_reg_equiv (REGNO (exp));
1659 remove_from_table (elt, hash);
1661 if (insert_regs (exp, class1, 0))
1663 rehash_using_reg (exp);
1664 hash = HASH (exp, mode);
1666 new = insert (exp, class1, hash, mode);
1667 new->in_memory = hash_arg_in_memory;
1668 new->in_struct = hash_arg_in_struct;
1674 /* Flush the entire hash table. */
1676 static void
1677 flush_hash_table ()
1679 int i;
1680 struct table_elt *p;
1682 for (i = 0; i < NBUCKETS; i++)
1683 for (p = table[i]; p; p = table[i])
1685 /* Note that invalidate can remove elements
1686 after P in the current hash chain. */
1687 if (GET_CODE (p->exp) == REG)
1688 invalidate (p->exp, p->mode);
1689 else
1690 remove_from_table (p, i);
1695 /* Remove from the hash table, or mark as invalid,
1696 all expressions whose values could be altered by storing in X.
1697 X is a register, a subreg, or a memory reference with nonvarying address
1698 (because, when a memory reference with a varying address is stored in,
1699 all memory references are removed by invalidate_memory
1700 so specific invalidation is superfluous).
1701 FULL_MODE, if not VOIDmode, indicates that this much should be invalidated
1702 instead of just the amount indicated by the mode of X. This is only used
1703 for bitfield stores into memory.
1705 A nonvarying address may be just a register or just
1706 a symbol reference, or it may be either of those plus
1707 a numeric offset. */
1709 static void
1710 invalidate (x, full_mode)
1711 rtx x;
1712 enum machine_mode full_mode;
1714 register int i;
1715 register struct table_elt *p;
1717 /* If X is a register, dependencies on its contents
1718 are recorded through the qty number mechanism.
1719 Just change the qty number of the register,
1720 mark it as invalid for expressions that refer to it,
1721 and remove it itself. */
1723 if (GET_CODE (x) == REG)
1725 register int regno = REGNO (x);
1726 register unsigned hash = HASH (x, GET_MODE (x));
1728 /* Remove REGNO from any quantity list it might be on and indicate
1729 that its value might have changed. If it is a pseudo, remove its
1730 entry from the hash table.
1732 For a hard register, we do the first two actions above for any
1733 additional hard registers corresponding to X. Then, if any of these
1734 registers are in the table, we must remove any REG entries that
1735 overlap these registers. */
1737 delete_reg_equiv (regno);
1738 REG_TICK (regno)++;
1740 if (regno >= FIRST_PSEUDO_REGISTER)
1742 /* Because a register can be referenced in more than one mode,
1743 we might have to remove more than one table entry. */
1745 struct table_elt *elt;
1747 while ((elt = lookup_for_remove (x, hash, GET_MODE (x))))
1748 remove_from_table (elt, hash);
1750 else
1752 HOST_WIDE_INT in_table
1753 = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
1754 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1755 int tregno, tendregno;
1756 register struct table_elt *p, *next;
1758 CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
1760 for (i = regno + 1; i < endregno; i++)
1762 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
1763 CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
1764 delete_reg_equiv (i);
1765 REG_TICK (i)++;
1768 if (in_table)
1769 for (hash = 0; hash < NBUCKETS; hash++)
1770 for (p = table[hash]; p; p = next)
1772 next = p->next_same_hash;
1774 if (GET_CODE (p->exp) != REG
1775 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1776 continue;
1778 tregno = REGNO (p->exp);
1779 tendregno
1780 = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
1781 if (tendregno > regno && tregno < endregno)
1782 remove_from_table (p, hash);
1786 return;
1789 if (GET_CODE (x) == SUBREG)
1791 if (GET_CODE (SUBREG_REG (x)) != REG)
1792 abort ();
1793 invalidate (SUBREG_REG (x), VOIDmode);
1794 return;
1797 /* If X is a parallel, invalidate all of its elements. */
1799 if (GET_CODE (x) == PARALLEL)
1801 for (i = XVECLEN (x, 0) - 1; i >= 0 ; --i)
1802 invalidate (XVECEXP (x, 0, i), VOIDmode);
1803 return;
1806 /* If X is an expr_list, this is part of a disjoint return value;
1807 extract the location in question ignoring the offset. */
1809 if (GET_CODE (x) == EXPR_LIST)
1811 invalidate (XEXP (x, 0), VOIDmode);
1812 return;
1815 /* X is not a register; it must be a memory reference with
1816 a nonvarying address. Remove all hash table elements
1817 that refer to overlapping pieces of memory. */
1819 if (GET_CODE (x) != MEM)
1820 abort ();
1822 if (full_mode == VOIDmode)
1823 full_mode = GET_MODE (x);
1825 for (i = 0; i < NBUCKETS; i++)
1827 register struct table_elt *next;
1828 for (p = table[i]; p; p = next)
1830 next = p->next_same_hash;
1831 /* Invalidate ASM_OPERANDS which reference memory (this is easier
1832 than checking all the aliases). */
1833 if (p->in_memory
1834 && (GET_CODE (p->exp) != MEM
1835 || true_dependence (x, full_mode, p->exp, cse_rtx_varies_p)))
1836 remove_from_table (p, i);
1841 /* Remove all expressions that refer to register REGNO,
1842 since they are already invalid, and we are about to
1843 mark that register valid again and don't want the old
1844 expressions to reappear as valid. */
1846 static void
1847 remove_invalid_refs (regno)
1848 int regno;
1850 register int i;
1851 register struct table_elt *p, *next;
1853 for (i = 0; i < NBUCKETS; i++)
1854 for (p = table[i]; p; p = next)
1856 next = p->next_same_hash;
1857 if (GET_CODE (p->exp) != REG
1858 && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
1859 remove_from_table (p, i);
1863 /* Likewise for a subreg with subreg_reg WORD and mode MODE. */
1864 static void
1865 remove_invalid_subreg_refs (regno, word, mode)
1866 int regno;
1867 int word;
1868 enum machine_mode mode;
1870 register int i;
1871 register struct table_elt *p, *next;
1872 int end = word + (GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD;
1874 for (i = 0; i < NBUCKETS; i++)
1875 for (p = table[i]; p; p = next)
1877 rtx exp;
1878 next = p->next_same_hash;
1880 exp = p->exp;
1881 if (GET_CODE (p->exp) != REG
1882 && (GET_CODE (exp) != SUBREG
1883 || GET_CODE (SUBREG_REG (exp)) != REG
1884 || REGNO (SUBREG_REG (exp)) != regno
1885 || (((SUBREG_WORD (exp)
1886 + (GET_MODE_SIZE (GET_MODE (exp)) - 1) / UNITS_PER_WORD)
1887 >= word)
1888 && SUBREG_WORD (exp) <= end))
1889 && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
1890 remove_from_table (p, i);
1894 /* Recompute the hash codes of any valid entries in the hash table that
1895 reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
1897 This is called when we make a jump equivalence. */
1899 static void
1900 rehash_using_reg (x)
1901 rtx x;
1903 unsigned int i;
1904 struct table_elt *p, *next;
1905 unsigned hash;
1907 if (GET_CODE (x) == SUBREG)
1908 x = SUBREG_REG (x);
1910 /* If X is not a register or if the register is known not to be in any
1911 valid entries in the table, we have no work to do. */
1913 if (GET_CODE (x) != REG
1914 || REG_IN_TABLE (REGNO (x)) < 0
1915 || REG_IN_TABLE (REGNO (x)) != REG_TICK (REGNO (x)))
1916 return;
1918 /* Scan all hash chains looking for valid entries that mention X.
1919 If we find one and it is in the wrong hash chain, move it. We can skip
1920 objects that are registers, since they are handled specially. */
1922 for (i = 0; i < NBUCKETS; i++)
1923 for (p = table[i]; p; p = next)
1925 next = p->next_same_hash;
1926 if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
1927 && exp_equiv_p (p->exp, p->exp, 1, 0)
1928 && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
1930 if (p->next_same_hash)
1931 p->next_same_hash->prev_same_hash = p->prev_same_hash;
1933 if (p->prev_same_hash)
1934 p->prev_same_hash->next_same_hash = p->next_same_hash;
1935 else
1936 table[i] = p->next_same_hash;
1938 p->next_same_hash = table[hash];
1939 p->prev_same_hash = 0;
1940 if (table[hash])
1941 table[hash]->prev_same_hash = p;
1942 table[hash] = p;
1947 /* Remove from the hash table any expression that is a call-clobbered
1948 register. Also update their TICK values. */
1950 static void
1951 invalidate_for_call ()
1953 int regno, endregno;
1954 int i;
1955 unsigned hash;
1956 struct table_elt *p, *next;
1957 int in_table = 0;
1959 /* Go through all the hard registers. For each that is clobbered in
1960 a CALL_INSN, remove the register from quantity chains and update
1961 reg_tick if defined. Also see if any of these registers is currently
1962 in the table. */
1964 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1965 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1967 delete_reg_equiv (regno);
1968 if (REG_TICK (regno) >= 0)
1969 REG_TICK (regno)++;
1971 in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
1974 /* In the case where we have no call-clobbered hard registers in the
1975 table, we are done. Otherwise, scan the table and remove any
1976 entry that overlaps a call-clobbered register. */
1978 if (in_table)
1979 for (hash = 0; hash < NBUCKETS; hash++)
1980 for (p = table[hash]; p; p = next)
1982 next = p->next_same_hash;
1984 if (p->in_memory)
1986 remove_from_table (p, hash);
1987 continue;
1990 if (GET_CODE (p->exp) != REG
1991 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1992 continue;
1994 regno = REGNO (p->exp);
1995 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
1997 for (i = regno; i < endregno; i++)
1998 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
2000 remove_from_table (p, hash);
2001 break;
2006 /* Given an expression X of type CONST,
2007 and ELT which is its table entry (or 0 if it
2008 is not in the hash table),
2009 return an alternate expression for X as a register plus integer.
2010 If none can be found, return 0. */
2012 static rtx
2013 use_related_value (x, elt)
2014 rtx x;
2015 struct table_elt *elt;
2017 register struct table_elt *relt = 0;
2018 register struct table_elt *p, *q;
2019 HOST_WIDE_INT offset;
2021 /* First, is there anything related known?
2022 If we have a table element, we can tell from that.
2023 Otherwise, must look it up. */
2025 if (elt != 0 && elt->related_value != 0)
2026 relt = elt;
2027 else if (elt == 0 && GET_CODE (x) == CONST)
2029 rtx subexp = get_related_value (x);
2030 if (subexp != 0)
2031 relt = lookup (subexp,
2032 safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
2033 GET_MODE (subexp));
2036 if (relt == 0)
2037 return 0;
2039 /* Search all related table entries for one that has an
2040 equivalent register. */
2042 p = relt;
2043 while (1)
2045 /* This loop is strange in that it is executed in two different cases.
2046 The first is when X is already in the table. Then it is searching
2047 the RELATED_VALUE list of X's class (RELT). The second case is when
2048 X is not in the table. Then RELT points to a class for the related
2049 value.
2051 Ensure that, whatever case we are in, that we ignore classes that have
2052 the same value as X. */
2054 if (rtx_equal_p (x, p->exp))
2055 q = 0;
2056 else
2057 for (q = p->first_same_value; q; q = q->next_same_value)
2058 if (GET_CODE (q->exp) == REG)
2059 break;
2061 if (q)
2062 break;
2064 p = p->related_value;
2066 /* We went all the way around, so there is nothing to be found.
2067 Alternatively, perhaps RELT was in the table for some other reason
2068 and it has no related values recorded. */
2069 if (p == relt || p == 0)
2070 break;
2073 if (q == 0)
2074 return 0;
2076 offset = (get_integer_term (x) - get_integer_term (p->exp));
2077 /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
2078 return plus_constant (q->exp, offset);
2081 /* Hash an rtx. We are careful to make sure the value is never negative.
2082 Equivalent registers hash identically.
2083 MODE is used in hashing for CONST_INTs only;
2084 otherwise the mode of X is used.
2086 Store 1 in do_not_record if any subexpression is volatile.
2088 Store 1 in hash_arg_in_memory if X contains a MEM rtx
2089 which does not have the RTX_UNCHANGING_P bit set.
2090 In this case, also store 1 in hash_arg_in_struct
2091 if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
2093 Note that cse_insn knows that the hash code of a MEM expression
2094 is just (int) MEM plus the hash code of the address. */
2096 static unsigned
2097 canon_hash (x, mode)
2098 rtx x;
2099 enum machine_mode mode;
2101 register int i, j;
2102 register unsigned hash = 0;
2103 register enum rtx_code code;
2104 register const char *fmt;
2106 /* repeat is used to turn tail-recursion into iteration. */
2107 repeat:
2108 if (x == 0)
2109 return hash;
2111 code = GET_CODE (x);
2112 switch (code)
2114 case REG:
2116 register int regno = REGNO (x);
2118 /* On some machines, we can't record any non-fixed hard register,
2119 because extending its life will cause reload problems. We
2120 consider ap, fp, and sp to be fixed for this purpose.
2122 We also consider CCmode registers to be fixed for this purpose;
2123 failure to do so leads to failure to simplify 0<100 type of
2124 conditionals.
2126 On all machines, we can't record any global registers. */
2128 if (regno < FIRST_PSEUDO_REGISTER
2129 && (global_regs[regno]
2130 || (SMALL_REGISTER_CLASSES
2131 && ! fixed_regs[regno]
2132 && regno != FRAME_POINTER_REGNUM
2133 && regno != HARD_FRAME_POINTER_REGNUM
2134 && regno != ARG_POINTER_REGNUM
2135 && regno != STACK_POINTER_REGNUM
2136 && GET_MODE_CLASS (GET_MODE (x)) != MODE_CC)))
2138 do_not_record = 1;
2139 return 0;
2141 hash += ((unsigned) REG << 7) + (unsigned) REG_QTY (regno);
2142 return hash;
2145 /* We handle SUBREG of a REG specially because the underlying
2146 reg changes its hash value with every value change; we don't
2147 want to have to forget unrelated subregs when one subreg changes. */
2148 case SUBREG:
2150 if (GET_CODE (SUBREG_REG (x)) == REG)
2152 hash += (((unsigned) SUBREG << 7)
2153 + REGNO (SUBREG_REG (x)) + SUBREG_WORD (x));
2154 return hash;
2156 break;
2159 case CONST_INT:
2161 unsigned HOST_WIDE_INT tem = INTVAL (x);
2162 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
2163 return hash;
2166 case CONST_DOUBLE:
2167 /* This is like the general case, except that it only counts
2168 the integers representing the constant. */
2169 hash += (unsigned) code + (unsigned) GET_MODE (x);
2170 if (GET_MODE (x) != VOIDmode)
2171 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
2173 unsigned HOST_WIDE_INT tem = XWINT (x, i);
2174 hash += tem;
2176 else
2177 hash += ((unsigned) CONST_DOUBLE_LOW (x)
2178 + (unsigned) CONST_DOUBLE_HIGH (x));
2179 return hash;
2181 /* Assume there is only one rtx object for any given label. */
2182 case LABEL_REF:
2183 hash
2184 += ((unsigned) LABEL_REF << 7) + (unsigned long) XEXP (x, 0);
2185 return hash;
2187 case SYMBOL_REF:
2188 hash
2189 += ((unsigned) SYMBOL_REF << 7) + (unsigned long) XSTR (x, 0);
2190 return hash;
2192 case MEM:
2193 if (MEM_VOLATILE_P (x))
2195 do_not_record = 1;
2196 return 0;
2198 if (! RTX_UNCHANGING_P (x) || FIXED_BASE_PLUS_P (XEXP (x, 0)))
2200 hash_arg_in_memory = 1;
2201 if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
2203 /* Now that we have already found this special case,
2204 might as well speed it up as much as possible. */
2205 hash += (unsigned) MEM;
2206 x = XEXP (x, 0);
2207 goto repeat;
2209 case PRE_DEC:
2210 case PRE_INC:
2211 case POST_DEC:
2212 case POST_INC:
2213 case PC:
2214 case CC0:
2215 case CALL:
2216 case UNSPEC_VOLATILE:
2217 do_not_record = 1;
2218 return 0;
2220 case ASM_OPERANDS:
2221 if (MEM_VOLATILE_P (x))
2223 do_not_record = 1;
2224 return 0;
2226 break;
2228 default:
2229 break;
2232 i = GET_RTX_LENGTH (code) - 1;
2233 hash += (unsigned) code + (unsigned) GET_MODE (x);
2234 fmt = GET_RTX_FORMAT (code);
2235 for (; i >= 0; i--)
2237 if (fmt[i] == 'e')
2239 rtx tem = XEXP (x, i);
2241 /* If we are about to do the last recursive call
2242 needed at this level, change it into iteration.
2243 This function is called enough to be worth it. */
2244 if (i == 0)
2246 x = tem;
2247 goto repeat;
2249 hash += canon_hash (tem, 0);
2251 else if (fmt[i] == 'E')
2252 for (j = 0; j < XVECLEN (x, i); j++)
2253 hash += canon_hash (XVECEXP (x, i, j), 0);
2254 else if (fmt[i] == 's')
2256 register unsigned char *p = (unsigned char *) XSTR (x, i);
2257 if (p)
2258 while (*p)
2259 hash += *p++;
2261 else if (fmt[i] == 'i')
2263 register unsigned tem = XINT (x, i);
2264 hash += tem;
2266 else if (fmt[i] == '0' || fmt[i] == 't')
2267 /* unused */;
2268 else
2269 abort ();
2271 return hash;
2274 /* Like canon_hash but with no side effects. */
2276 static unsigned
2277 safe_hash (x, mode)
2278 rtx x;
2279 enum machine_mode mode;
2281 int save_do_not_record = do_not_record;
2282 int save_hash_arg_in_memory = hash_arg_in_memory;
2283 int save_hash_arg_in_struct = hash_arg_in_struct;
2284 unsigned hash = canon_hash (x, mode);
2285 hash_arg_in_memory = save_hash_arg_in_memory;
2286 hash_arg_in_struct = save_hash_arg_in_struct;
2287 do_not_record = save_do_not_record;
2288 return hash;
2291 /* Return 1 iff X and Y would canonicalize into the same thing,
2292 without actually constructing the canonicalization of either one.
2293 If VALIDATE is nonzero,
2294 we assume X is an expression being processed from the rtl
2295 and Y was found in the hash table. We check register refs
2296 in Y for being marked as valid.
2298 If EQUAL_VALUES is nonzero, we allow a register to match a constant value
2299 that is known to be in the register. Ordinarily, we don't allow them
2300 to match, because letting them match would cause unpredictable results
2301 in all the places that search a hash table chain for an equivalent
2302 for a given value. A possible equivalent that has different structure
2303 has its hash code computed from different data. Whether the hash code
2304 is the same as that of the given value is pure luck. */
2306 static int
2307 exp_equiv_p (x, y, validate, equal_values)
2308 rtx x, y;
2309 int validate;
2310 int equal_values;
2312 register int i, j;
2313 register enum rtx_code code;
2314 register const char *fmt;
2316 /* Note: it is incorrect to assume an expression is equivalent to itself
2317 if VALIDATE is nonzero. */
2318 if (x == y && !validate)
2319 return 1;
2320 if (x == 0 || y == 0)
2321 return x == y;
2323 code = GET_CODE (x);
2324 if (code != GET_CODE (y))
2326 if (!equal_values)
2327 return 0;
2329 /* If X is a constant and Y is a register or vice versa, they may be
2330 equivalent. We only have to validate if Y is a register. */
2331 if (CONSTANT_P (x) && GET_CODE (y) == REG
2332 && REGNO_QTY_VALID_P (REGNO (y))
2333 && GET_MODE (y) == qty_mode[REG_QTY (REGNO (y))]
2334 && rtx_equal_p (x, qty_const[REG_QTY (REGNO (y))])
2335 && (! validate || REG_IN_TABLE (REGNO (y)) == REG_TICK (REGNO (y))))
2336 return 1;
2338 if (CONSTANT_P (y) && code == REG
2339 && REGNO_QTY_VALID_P (REGNO (x))
2340 && GET_MODE (x) == qty_mode[REG_QTY (REGNO (x))]
2341 && rtx_equal_p (y, qty_const[REG_QTY (REGNO (x))]))
2342 return 1;
2344 return 0;
2347 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
2348 if (GET_MODE (x) != GET_MODE (y))
2349 return 0;
2351 switch (code)
2353 case PC:
2354 case CC0:
2355 return x == y;
2357 case CONST_INT:
2358 return INTVAL (x) == INTVAL (y);
2360 case LABEL_REF:
2361 return XEXP (x, 0) == XEXP (y, 0);
2363 case SYMBOL_REF:
2364 return XSTR (x, 0) == XSTR (y, 0);
2366 case REG:
2368 int regno = REGNO (y);
2369 int endregno
2370 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
2371 : HARD_REGNO_NREGS (regno, GET_MODE (y)));
2372 int i;
2374 /* If the quantities are not the same, the expressions are not
2375 equivalent. If there are and we are not to validate, they
2376 are equivalent. Otherwise, ensure all regs are up-to-date. */
2378 if (REG_QTY (REGNO (x)) != REG_QTY (regno))
2379 return 0;
2381 if (! validate)
2382 return 1;
2384 for (i = regno; i < endregno; i++)
2385 if (REG_IN_TABLE (i) != REG_TICK (i))
2386 return 0;
2388 return 1;
2391 /* For commutative operations, check both orders. */
2392 case PLUS:
2393 case MULT:
2394 case AND:
2395 case IOR:
2396 case XOR:
2397 case NE:
2398 case EQ:
2399 return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
2400 && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
2401 validate, equal_values))
2402 || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
2403 validate, equal_values)
2404 && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
2405 validate, equal_values)));
2407 default:
2408 break;
2411 /* Compare the elements. If any pair of corresponding elements
2412 fail to match, return 0 for the whole things. */
2414 fmt = GET_RTX_FORMAT (code);
2415 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2417 switch (fmt[i])
2419 case 'e':
2420 if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
2421 return 0;
2422 break;
2424 case 'E':
2425 if (XVECLEN (x, i) != XVECLEN (y, i))
2426 return 0;
2427 for (j = 0; j < XVECLEN (x, i); j++)
2428 if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2429 validate, equal_values))
2430 return 0;
2431 break;
2433 case 's':
2434 if (strcmp (XSTR (x, i), XSTR (y, i)))
2435 return 0;
2436 break;
2438 case 'i':
2439 if (XINT (x, i) != XINT (y, i))
2440 return 0;
2441 break;
2443 case 'w':
2444 if (XWINT (x, i) != XWINT (y, i))
2445 return 0;
2446 break;
2448 case '0':
2449 case 't':
2450 break;
2452 default:
2453 abort ();
2457 return 1;
2460 /* Return 1 iff any subexpression of X matches Y.
2461 Here we do not require that X or Y be valid (for registers referred to)
2462 for being in the hash table. */
2464 static int
2465 refers_to_p (x, y)
2466 rtx x, y;
2468 register int i;
2469 register enum rtx_code code;
2470 register const char *fmt;
2472 repeat:
2473 if (x == y)
2474 return 1;
2475 if (x == 0 || y == 0)
2476 return 0;
2478 code = GET_CODE (x);
2479 /* If X as a whole has the same code as Y, they may match.
2480 If so, return 1. */
2481 if (code == GET_CODE (y))
2483 if (exp_equiv_p (x, y, 0, 1))
2484 return 1;
2487 /* X does not match, so try its subexpressions. */
2489 fmt = GET_RTX_FORMAT (code);
2490 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2491 if (fmt[i] == 'e')
2493 if (i == 0)
2495 x = XEXP (x, 0);
2496 goto repeat;
2498 else
2499 if (refers_to_p (XEXP (x, i), y))
2500 return 1;
2502 else if (fmt[i] == 'E')
2504 int j;
2505 for (j = 0; j < XVECLEN (x, i); j++)
2506 if (refers_to_p (XVECEXP (x, i, j), y))
2507 return 1;
2510 return 0;
2513 /* Given ADDR and SIZE (a memory address, and the size of the memory reference),
2514 set PBASE, PSTART, and PEND which correspond to the base of the address,
2515 the starting offset, and ending offset respectively.
2517 ADDR is known to be a nonvarying address. */
2519 /* ??? Despite what the comments say, this function is in fact frequently
2520 passed varying addresses. This does not appear to cause any problems. */
2522 static void
2523 set_nonvarying_address_components (addr, size, pbase, pstart, pend)
2524 rtx addr;
2525 int size;
2526 rtx *pbase;
2527 HOST_WIDE_INT *pstart, *pend;
2529 rtx base;
2530 HOST_WIDE_INT start, end;
2532 base = addr;
2533 start = 0;
2534 end = 0;
2536 if (flag_pic && GET_CODE (base) == PLUS
2537 && XEXP (base, 0) == pic_offset_table_rtx)
2538 base = XEXP (base, 1);
2540 /* Registers with nonvarying addresses usually have constant equivalents;
2541 but the frame pointer register is also possible. */
2542 if (GET_CODE (base) == REG
2543 && qty_const != 0
2544 && REGNO_QTY_VALID_P (REGNO (base))
2545 && qty_mode[REG_QTY (REGNO (base))] == GET_MODE (base)
2546 && qty_const[REG_QTY (REGNO (base))] != 0)
2547 base = qty_const[REG_QTY (REGNO (base))];
2548 else if (GET_CODE (base) == PLUS
2549 && GET_CODE (XEXP (base, 1)) == CONST_INT
2550 && GET_CODE (XEXP (base, 0)) == REG
2551 && qty_const != 0
2552 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2553 && (qty_mode[REG_QTY (REGNO (XEXP (base, 0)))]
2554 == GET_MODE (XEXP (base, 0)))
2555 && qty_const[REG_QTY (REGNO (XEXP (base, 0)))])
2557 start = INTVAL (XEXP (base, 1));
2558 base = qty_const[REG_QTY (REGNO (XEXP (base, 0)))];
2560 /* This can happen as the result of virtual register instantiation,
2561 if the initial offset is too large to be a valid address. */
2562 else if (GET_CODE (base) == PLUS
2563 && GET_CODE (XEXP (base, 0)) == REG
2564 && GET_CODE (XEXP (base, 1)) == REG
2565 && qty_const != 0
2566 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2567 && (qty_mode[REG_QTY (REGNO (XEXP (base, 0)))]
2568 == GET_MODE (XEXP (base, 0)))
2569 && qty_const[REG_QTY (REGNO (XEXP (base, 0)))]
2570 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 1)))
2571 && (qty_mode[REG_QTY (REGNO (XEXP (base, 1)))]
2572 == GET_MODE (XEXP (base, 1)))
2573 && qty_const[REG_QTY (REGNO (XEXP (base, 1)))])
2575 rtx tem = qty_const[REG_QTY (REGNO (XEXP (base, 1)))];
2576 base = qty_const[REG_QTY (REGNO (XEXP (base, 0)))];
2578 /* One of the two values must be a constant. */
2579 if (GET_CODE (base) != CONST_INT)
2581 if (GET_CODE (tem) != CONST_INT)
2582 abort ();
2583 start = INTVAL (tem);
2585 else
2587 start = INTVAL (base);
2588 base = tem;
2592 /* Handle everything that we can find inside an address that has been
2593 viewed as constant. */
2595 while (1)
2597 /* If no part of this switch does a "continue", the code outside
2598 will exit this loop. */
2600 switch (GET_CODE (base))
2602 case LO_SUM:
2603 /* By definition, operand1 of a LO_SUM is the associated constant
2604 address. Use the associated constant address as the base
2605 instead. */
2606 base = XEXP (base, 1);
2607 continue;
2609 case CONST:
2610 /* Strip off CONST. */
2611 base = XEXP (base, 0);
2612 continue;
2614 case PLUS:
2615 if (GET_CODE (XEXP (base, 1)) == CONST_INT)
2617 start += INTVAL (XEXP (base, 1));
2618 base = XEXP (base, 0);
2619 continue;
2621 break;
2623 case AND:
2624 /* Handle the case of an AND which is the negative of a power of
2625 two. This is used to represent unaligned memory operations. */
2626 if (GET_CODE (XEXP (base, 1)) == CONST_INT
2627 && exact_log2 (- INTVAL (XEXP (base, 1))) > 0)
2629 set_nonvarying_address_components (XEXP (base, 0), size,
2630 pbase, pstart, pend);
2632 /* Assume the worst misalignment. START is affected, but not
2633 END, so compensate but adjusting SIZE. Don't lose any
2634 constant we already had. */
2636 size = *pend - *pstart - INTVAL (XEXP (base, 1)) - 1;
2637 start += *pstart + INTVAL (XEXP (base, 1)) + 1;
2638 end += *pend;
2639 base = *pbase;
2641 break;
2643 default:
2644 break;
2647 break;
2650 if (GET_CODE (base) == CONST_INT)
2652 start += INTVAL (base);
2653 base = const0_rtx;
2656 end = start + size;
2658 /* Set the return values. */
2659 *pbase = base;
2660 *pstart = start;
2661 *pend = end;
2664 /* Return 1 if X has a value that can vary even between two
2665 executions of the program. 0 means X can be compared reliably
2666 against certain constants or near-constants. */
2668 static int
2669 cse_rtx_varies_p (x)
2670 register rtx x;
2672 /* We need not check for X and the equivalence class being of the same
2673 mode because if X is equivalent to a constant in some mode, it
2674 doesn't vary in any mode. */
2676 if (GET_CODE (x) == REG
2677 && REGNO_QTY_VALID_P (REGNO (x))
2678 && GET_MODE (x) == qty_mode[REG_QTY (REGNO (x))]
2679 && qty_const[REG_QTY (REGNO (x))] != 0)
2680 return 0;
2682 if (GET_CODE (x) == PLUS
2683 && GET_CODE (XEXP (x, 1)) == CONST_INT
2684 && GET_CODE (XEXP (x, 0)) == REG
2685 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
2686 && (GET_MODE (XEXP (x, 0))
2687 == qty_mode[REG_QTY (REGNO (XEXP (x, 0)))])
2688 && qty_const[REG_QTY (REGNO (XEXP (x, 0)))])
2689 return 0;
2691 /* This can happen as the result of virtual register instantiation, if
2692 the initial constant is too large to be a valid address. This gives
2693 us a three instruction sequence, load large offset into a register,
2694 load fp minus a constant into a register, then a MEM which is the
2695 sum of the two `constant' registers. */
2696 if (GET_CODE (x) == PLUS
2697 && GET_CODE (XEXP (x, 0)) == REG
2698 && GET_CODE (XEXP (x, 1)) == REG
2699 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
2700 && (GET_MODE (XEXP (x, 0))
2701 == qty_mode[REG_QTY (REGNO (XEXP (x, 0)))])
2702 && qty_const[REG_QTY (REGNO (XEXP (x, 0)))]
2703 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 1)))
2704 && (GET_MODE (XEXP (x, 1))
2705 == qty_mode[REG_QTY (REGNO (XEXP (x, 1)))])
2706 && qty_const[REG_QTY (REGNO (XEXP (x, 1)))])
2707 return 0;
2709 return rtx_varies_p (x);
2712 /* Canonicalize an expression:
2713 replace each register reference inside it
2714 with the "oldest" equivalent register.
2716 If INSN is non-zero and we are replacing a pseudo with a hard register
2717 or vice versa, validate_change is used to ensure that INSN remains valid
2718 after we make our substitution. The calls are made with IN_GROUP non-zero
2719 so apply_change_group must be called upon the outermost return from this
2720 function (unless INSN is zero). The result of apply_change_group can
2721 generally be discarded since the changes we are making are optional. */
2723 static rtx
2724 canon_reg (x, insn)
2725 rtx x;
2726 rtx insn;
2728 register int i;
2729 register enum rtx_code code;
2730 register const char *fmt;
2732 if (x == 0)
2733 return x;
2735 code = GET_CODE (x);
2736 switch (code)
2738 case PC:
2739 case CC0:
2740 case CONST:
2741 case CONST_INT:
2742 case CONST_DOUBLE:
2743 case SYMBOL_REF:
2744 case LABEL_REF:
2745 case ADDR_VEC:
2746 case ADDR_DIFF_VEC:
2747 return x;
2749 case REG:
2751 register int first;
2753 /* Never replace a hard reg, because hard regs can appear
2754 in more than one machine mode, and we must preserve the mode
2755 of each occurrence. Also, some hard regs appear in
2756 MEMs that are shared and mustn't be altered. Don't try to
2757 replace any reg that maps to a reg of class NO_REGS. */
2758 if (REGNO (x) < FIRST_PSEUDO_REGISTER
2759 || ! REGNO_QTY_VALID_P (REGNO (x)))
2760 return x;
2762 first = qty_first_reg[REG_QTY (REGNO (x))];
2763 return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
2764 : REGNO_REG_CLASS (first) == NO_REGS ? x
2765 : gen_rtx_REG (qty_mode[REG_QTY (REGNO (x))], first));
2768 default:
2769 break;
2772 fmt = GET_RTX_FORMAT (code);
2773 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2775 register int j;
2777 if (fmt[i] == 'e')
2779 rtx new = canon_reg (XEXP (x, i), insn);
2780 int insn_code;
2782 /* If replacing pseudo with hard reg or vice versa, ensure the
2783 insn remains valid. Likewise if the insn has MATCH_DUPs. */
2784 if (insn != 0 && new != 0
2785 && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
2786 && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
2787 != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
2788 || (insn_code = recog_memoized (insn)) < 0
2789 || insn_data[insn_code].n_dups > 0))
2790 validate_change (insn, &XEXP (x, i), new, 1);
2791 else
2792 XEXP (x, i) = new;
2794 else if (fmt[i] == 'E')
2795 for (j = 0; j < XVECLEN (x, i); j++)
2796 XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
2799 return x;
2802 /* LOC is a location within INSN that is an operand address (the contents of
2803 a MEM). Find the best equivalent address to use that is valid for this
2804 insn.
2806 On most CISC machines, complicated address modes are costly, and rtx_cost
2807 is a good approximation for that cost. However, most RISC machines have
2808 only a few (usually only one) memory reference formats. If an address is
2809 valid at all, it is often just as cheap as any other address. Hence, for
2810 RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
2811 costs of various addresses. For two addresses of equal cost, choose the one
2812 with the highest `rtx_cost' value as that has the potential of eliminating
2813 the most insns. For equal costs, we choose the first in the equivalence
2814 class. Note that we ignore the fact that pseudo registers are cheaper
2815 than hard registers here because we would also prefer the pseudo registers.
2818 static void
2819 find_best_addr (insn, loc)
2820 rtx insn;
2821 rtx *loc;
2823 struct table_elt *elt;
2824 rtx addr = *loc;
2825 #ifdef ADDRESS_COST
2826 struct table_elt *p;
2827 int found_better = 1;
2828 #endif
2829 int save_do_not_record = do_not_record;
2830 int save_hash_arg_in_memory = hash_arg_in_memory;
2831 int save_hash_arg_in_struct = hash_arg_in_struct;
2832 int addr_volatile;
2833 int regno;
2834 unsigned hash;
2836 /* Do not try to replace constant addresses or addresses of local and
2837 argument slots. These MEM expressions are made only once and inserted
2838 in many instructions, as well as being used to control symbol table
2839 output. It is not safe to clobber them.
2841 There are some uncommon cases where the address is already in a register
2842 for some reason, but we cannot take advantage of that because we have
2843 no easy way to unshare the MEM. In addition, looking up all stack
2844 addresses is costly. */
2845 if ((GET_CODE (addr) == PLUS
2846 && GET_CODE (XEXP (addr, 0)) == REG
2847 && GET_CODE (XEXP (addr, 1)) == CONST_INT
2848 && (regno = REGNO (XEXP (addr, 0)),
2849 regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
2850 || regno == ARG_POINTER_REGNUM))
2851 || (GET_CODE (addr) == REG
2852 && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
2853 || regno == HARD_FRAME_POINTER_REGNUM
2854 || regno == ARG_POINTER_REGNUM))
2855 || GET_CODE (addr) == ADDRESSOF
2856 || CONSTANT_ADDRESS_P (addr))
2857 return;
2859 /* If this address is not simply a register, try to fold it. This will
2860 sometimes simplify the expression. Many simplifications
2861 will not be valid, but some, usually applying the associative rule, will
2862 be valid and produce better code. */
2863 if (GET_CODE (addr) != REG)
2865 rtx folded = fold_rtx (copy_rtx (addr), NULL_RTX);
2867 if (1
2868 #ifdef ADDRESS_COST
2869 && (CSE_ADDRESS_COST (folded) < CSE_ADDRESS_COST (addr)
2870 || (CSE_ADDRESS_COST (folded) == CSE_ADDRESS_COST (addr)
2871 && rtx_cost (folded, MEM) > rtx_cost (addr, MEM)))
2872 #else
2873 && rtx_cost (folded, MEM) < rtx_cost (addr, MEM)
2874 #endif
2875 && validate_change (insn, loc, folded, 0))
2876 addr = folded;
2879 /* If this address is not in the hash table, we can't look for equivalences
2880 of the whole address. Also, ignore if volatile. */
2882 do_not_record = 0;
2883 hash = HASH (addr, Pmode);
2884 addr_volatile = do_not_record;
2885 do_not_record = save_do_not_record;
2886 hash_arg_in_memory = save_hash_arg_in_memory;
2887 hash_arg_in_struct = save_hash_arg_in_struct;
2889 if (addr_volatile)
2890 return;
2892 elt = lookup (addr, hash, Pmode);
2894 #ifndef ADDRESS_COST
2895 if (elt)
2897 int our_cost = elt->cost;
2899 /* Find the lowest cost below ours that works. */
2900 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
2901 if (elt->cost < our_cost
2902 && (GET_CODE (elt->exp) == REG
2903 || exp_equiv_p (elt->exp, elt->exp, 1, 0))
2904 && validate_change (insn, loc,
2905 canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
2906 return;
2908 #else
2910 if (elt)
2912 /* We need to find the best (under the criteria documented above) entry
2913 in the class that is valid. We use the `flag' field to indicate
2914 choices that were invalid and iterate until we can't find a better
2915 one that hasn't already been tried. */
2917 for (p = elt->first_same_value; p; p = p->next_same_value)
2918 p->flag = 0;
2920 while (found_better)
2922 int best_addr_cost = CSE_ADDRESS_COST (*loc);
2923 int best_rtx_cost = (elt->cost + 1) >> 1;
2924 struct table_elt *best_elt = elt;
2926 found_better = 0;
2927 for (p = elt->first_same_value; p; p = p->next_same_value)
2928 if (! p->flag)
2930 if ((GET_CODE (p->exp) == REG
2931 || exp_equiv_p (p->exp, p->exp, 1, 0))
2932 && (CSE_ADDRESS_COST (p->exp) < best_addr_cost
2933 || (CSE_ADDRESS_COST (p->exp) == best_addr_cost
2934 && (p->cost + 1) >> 1 > best_rtx_cost)))
2936 found_better = 1;
2937 best_addr_cost = CSE_ADDRESS_COST (p->exp);
2938 best_rtx_cost = (p->cost + 1) >> 1;
2939 best_elt = p;
2943 if (found_better)
2945 if (validate_change (insn, loc,
2946 canon_reg (copy_rtx (best_elt->exp),
2947 NULL_RTX), 0))
2948 return;
2949 else
2950 best_elt->flag = 1;
2955 /* If the address is a binary operation with the first operand a register
2956 and the second a constant, do the same as above, but looking for
2957 equivalences of the register. Then try to simplify before checking for
2958 the best address to use. This catches a few cases: First is when we
2959 have REG+const and the register is another REG+const. We can often merge
2960 the constants and eliminate one insn and one register. It may also be
2961 that a machine has a cheap REG+REG+const. Finally, this improves the
2962 code on the Alpha for unaligned byte stores. */
2964 if (flag_expensive_optimizations
2965 && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
2966 || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
2967 && GET_CODE (XEXP (*loc, 0)) == REG
2968 && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
2970 rtx c = XEXP (*loc, 1);
2972 do_not_record = 0;
2973 hash = HASH (XEXP (*loc, 0), Pmode);
2974 do_not_record = save_do_not_record;
2975 hash_arg_in_memory = save_hash_arg_in_memory;
2976 hash_arg_in_struct = save_hash_arg_in_struct;
2978 elt = lookup (XEXP (*loc, 0), hash, Pmode);
2979 if (elt == 0)
2980 return;
2982 /* We need to find the best (under the criteria documented above) entry
2983 in the class that is valid. We use the `flag' field to indicate
2984 choices that were invalid and iterate until we can't find a better
2985 one that hasn't already been tried. */
2987 for (p = elt->first_same_value; p; p = p->next_same_value)
2988 p->flag = 0;
2990 while (found_better)
2992 int best_addr_cost = CSE_ADDRESS_COST (*loc);
2993 int best_rtx_cost = (COST (*loc) + 1) >> 1;
2994 struct table_elt *best_elt = elt;
2995 rtx best_rtx = *loc;
2996 int count;
2998 /* This is at worst case an O(n^2) algorithm, so limit our search
2999 to the first 32 elements on the list. This avoids trouble
3000 compiling code with very long basic blocks that can easily
3001 call cse_gen_binary so many times that we run out of memory. */
3003 found_better = 0;
3004 for (p = elt->first_same_value, count = 0;
3005 p && count < 32;
3006 p = p->next_same_value, count++)
3007 if (! p->flag
3008 && (GET_CODE (p->exp) == REG
3009 || exp_equiv_p (p->exp, p->exp, 1, 0)))
3011 rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
3013 if ((CSE_ADDRESS_COST (new) < best_addr_cost
3014 || (CSE_ADDRESS_COST (new) == best_addr_cost
3015 && (COST (new) + 1) >> 1 > best_rtx_cost)))
3017 found_better = 1;
3018 best_addr_cost = CSE_ADDRESS_COST (new);
3019 best_rtx_cost = (COST (new) + 1) >> 1;
3020 best_elt = p;
3021 best_rtx = new;
3025 if (found_better)
3027 if (validate_change (insn, loc,
3028 canon_reg (copy_rtx (best_rtx),
3029 NULL_RTX), 0))
3030 return;
3031 else
3032 best_elt->flag = 1;
3036 #endif
3039 /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
3040 operation (EQ, NE, GT, etc.), follow it back through the hash table and
3041 what values are being compared.
3043 *PARG1 and *PARG2 are updated to contain the rtx representing the values
3044 actually being compared. For example, if *PARG1 was (cc0) and *PARG2
3045 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
3046 compared to produce cc0.
3048 The return value is the comparison operator and is either the code of
3049 A or the code corresponding to the inverse of the comparison. */
3051 static enum rtx_code
3052 find_comparison_args (code, parg1, parg2, pmode1, pmode2)
3053 enum rtx_code code;
3054 rtx *parg1, *parg2;
3055 enum machine_mode *pmode1, *pmode2;
3057 rtx arg1, arg2;
3059 arg1 = *parg1, arg2 = *parg2;
3061 /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
3063 while (arg2 == CONST0_RTX (GET_MODE (arg1)))
3065 /* Set non-zero when we find something of interest. */
3066 rtx x = 0;
3067 int reverse_code = 0;
3068 struct table_elt *p = 0;
3070 /* If arg1 is a COMPARE, extract the comparison arguments from it.
3071 On machines with CC0, this is the only case that can occur, since
3072 fold_rtx will return the COMPARE or item being compared with zero
3073 when given CC0. */
3075 if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
3076 x = arg1;
3078 /* If ARG1 is a comparison operator and CODE is testing for
3079 STORE_FLAG_VALUE, get the inner arguments. */
3081 else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
3083 if (code == NE
3084 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
3085 && code == LT && STORE_FLAG_VALUE == -1)
3086 #ifdef FLOAT_STORE_FLAG_VALUE
3087 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
3088 && FLOAT_STORE_FLAG_VALUE < 0)
3089 #endif
3091 x = arg1;
3092 else if (code == EQ
3093 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
3094 && code == GE && STORE_FLAG_VALUE == -1)
3095 #ifdef FLOAT_STORE_FLAG_VALUE
3096 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
3097 && FLOAT_STORE_FLAG_VALUE < 0)
3098 #endif
3100 x = arg1, reverse_code = 1;
3103 /* ??? We could also check for
3105 (ne (and (eq (...) (const_int 1))) (const_int 0))
3107 and related forms, but let's wait until we see them occurring. */
3109 if (x == 0)
3110 /* Look up ARG1 in the hash table and see if it has an equivalence
3111 that lets us see what is being compared. */
3112 p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
3113 GET_MODE (arg1));
3114 if (p) p = p->first_same_value;
3116 for (; p; p = p->next_same_value)
3118 enum machine_mode inner_mode = GET_MODE (p->exp);
3120 /* If the entry isn't valid, skip it. */
3121 if (! exp_equiv_p (p->exp, p->exp, 1, 0))
3122 continue;
3124 if (GET_CODE (p->exp) == COMPARE
3125 /* Another possibility is that this machine has a compare insn
3126 that includes the comparison code. In that case, ARG1 would
3127 be equivalent to a comparison operation that would set ARG1 to
3128 either STORE_FLAG_VALUE or zero. If this is an NE operation,
3129 ORIG_CODE is the actual comparison being done; if it is an EQ,
3130 we must reverse ORIG_CODE. On machine with a negative value
3131 for STORE_FLAG_VALUE, also look at LT and GE operations. */
3132 || ((code == NE
3133 || (code == LT
3134 && GET_MODE_CLASS (inner_mode) == MODE_INT
3135 && (GET_MODE_BITSIZE (inner_mode)
3136 <= HOST_BITS_PER_WIDE_INT)
3137 && (STORE_FLAG_VALUE
3138 & ((HOST_WIDE_INT) 1
3139 << (GET_MODE_BITSIZE (inner_mode) - 1))))
3140 #ifdef FLOAT_STORE_FLAG_VALUE
3141 || (code == LT
3142 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
3143 && FLOAT_STORE_FLAG_VALUE < 0)
3144 #endif
3146 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
3148 x = p->exp;
3149 break;
3151 else if ((code == EQ
3152 || (code == GE
3153 && GET_MODE_CLASS (inner_mode) == MODE_INT
3154 && (GET_MODE_BITSIZE (inner_mode)
3155 <= HOST_BITS_PER_WIDE_INT)
3156 && (STORE_FLAG_VALUE
3157 & ((HOST_WIDE_INT) 1
3158 << (GET_MODE_BITSIZE (inner_mode) - 1))))
3159 #ifdef FLOAT_STORE_FLAG_VALUE
3160 || (code == GE
3161 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
3162 && FLOAT_STORE_FLAG_VALUE < 0)
3163 #endif
3165 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
3167 reverse_code = 1;
3168 x = p->exp;
3169 break;
3172 /* If this is fp + constant, the equivalent is a better operand since
3173 it may let us predict the value of the comparison. */
3174 else if (NONZERO_BASE_PLUS_P (p->exp))
3176 arg1 = p->exp;
3177 continue;
3181 /* If we didn't find a useful equivalence for ARG1, we are done.
3182 Otherwise, set up for the next iteration. */
3183 if (x == 0)
3184 break;
3186 arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
3187 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
3188 code = GET_CODE (x);
3190 if (reverse_code)
3191 code = reverse_condition (code);
3194 /* Return our results. Return the modes from before fold_rtx
3195 because fold_rtx might produce const_int, and then it's too late. */
3196 *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
3197 *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
3199 return code;
3202 /* Try to simplify a unary operation CODE whose output mode is to be
3203 MODE with input operand OP whose mode was originally OP_MODE.
3204 Return zero if no simplification can be made. */
3207 simplify_unary_operation (code, mode, op, op_mode)
3208 enum rtx_code code;
3209 enum machine_mode mode;
3210 rtx op;
3211 enum machine_mode op_mode;
3213 register int width = GET_MODE_BITSIZE (mode);
3215 /* The order of these tests is critical so that, for example, we don't
3216 check the wrong mode (input vs. output) for a conversion operation,
3217 such as FIX. At some point, this should be simplified. */
3219 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
3221 if (code == FLOAT && GET_MODE (op) == VOIDmode
3222 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3224 HOST_WIDE_INT hv, lv;
3225 REAL_VALUE_TYPE d;
3227 if (GET_CODE (op) == CONST_INT)
3228 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3229 else
3230 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3232 #ifdef REAL_ARITHMETIC
3233 REAL_VALUE_FROM_INT (d, lv, hv, mode);
3234 #else
3235 if (hv < 0)
3237 d = (double) (~ hv);
3238 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3239 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3240 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
3241 d = (- d - 1.0);
3243 else
3245 d = (double) hv;
3246 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3247 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3248 d += (double) (unsigned HOST_WIDE_INT) lv;
3250 #endif /* REAL_ARITHMETIC */
3251 d = real_value_truncate (mode, d);
3252 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3254 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
3255 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3257 HOST_WIDE_INT hv, lv;
3258 REAL_VALUE_TYPE d;
3260 if (GET_CODE (op) == CONST_INT)
3261 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3262 else
3263 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3265 if (op_mode == VOIDmode)
3267 /* We don't know how to interpret negative-looking numbers in
3268 this case, so don't try to fold those. */
3269 if (hv < 0)
3270 return 0;
3272 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
3274 else
3275 hv = 0, lv &= GET_MODE_MASK (op_mode);
3277 #ifdef REAL_ARITHMETIC
3278 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
3279 #else
3281 d = (double) (unsigned HOST_WIDE_INT) hv;
3282 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3283 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3284 d += (double) (unsigned HOST_WIDE_INT) lv;
3285 #endif /* REAL_ARITHMETIC */
3286 d = real_value_truncate (mode, d);
3287 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3289 #endif
3291 if (GET_CODE (op) == CONST_INT
3292 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3294 register HOST_WIDE_INT arg0 = INTVAL (op);
3295 register HOST_WIDE_INT val;
3297 switch (code)
3299 case NOT:
3300 val = ~ arg0;
3301 break;
3303 case NEG:
3304 val = - arg0;
3305 break;
3307 case ABS:
3308 val = (arg0 >= 0 ? arg0 : - arg0);
3309 break;
3311 case FFS:
3312 /* Don't use ffs here. Instead, get low order bit and then its
3313 number. If arg0 is zero, this will return 0, as desired. */
3314 arg0 &= GET_MODE_MASK (mode);
3315 val = exact_log2 (arg0 & (- arg0)) + 1;
3316 break;
3318 case TRUNCATE:
3319 val = arg0;
3320 break;
3322 case ZERO_EXTEND:
3323 if (op_mode == VOIDmode)
3324 op_mode = mode;
3325 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3327 /* If we were really extending the mode,
3328 we would have to distinguish between zero-extension
3329 and sign-extension. */
3330 if (width != GET_MODE_BITSIZE (op_mode))
3331 abort ();
3332 val = arg0;
3334 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3335 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3336 else
3337 return 0;
3338 break;
3340 case SIGN_EXTEND:
3341 if (op_mode == VOIDmode)
3342 op_mode = mode;
3343 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3345 /* If we were really extending the mode,
3346 we would have to distinguish between zero-extension
3347 and sign-extension. */
3348 if (width != GET_MODE_BITSIZE (op_mode))
3349 abort ();
3350 val = arg0;
3352 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3355 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3356 if (val
3357 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
3358 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3360 else
3361 return 0;
3362 break;
3364 case SQRT:
3365 return 0;
3367 default:
3368 abort ();
3371 val = trunc_int_for_mode (val, mode);
3373 return GEN_INT (val);
3376 /* We can do some operations on integer CONST_DOUBLEs. Also allow
3377 for a DImode operation on a CONST_INT. */
3378 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
3379 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3381 HOST_WIDE_INT l1, h1, lv, hv;
3383 if (GET_CODE (op) == CONST_DOUBLE)
3384 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
3385 else
3386 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
3388 switch (code)
3390 case NOT:
3391 lv = ~ l1;
3392 hv = ~ h1;
3393 break;
3395 case NEG:
3396 neg_double (l1, h1, &lv, &hv);
3397 break;
3399 case ABS:
3400 if (h1 < 0)
3401 neg_double (l1, h1, &lv, &hv);
3402 else
3403 lv = l1, hv = h1;
3404 break;
3406 case FFS:
3407 hv = 0;
3408 if (l1 == 0)
3409 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
3410 else
3411 lv = exact_log2 (l1 & (-l1)) + 1;
3412 break;
3414 case TRUNCATE:
3415 /* This is just a change-of-mode, so do nothing. */
3416 lv = l1, hv = h1;
3417 break;
3419 case ZERO_EXTEND:
3420 if (op_mode == VOIDmode
3421 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3422 return 0;
3424 hv = 0;
3425 lv = l1 & GET_MODE_MASK (op_mode);
3426 break;
3428 case SIGN_EXTEND:
3429 if (op_mode == VOIDmode
3430 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3431 return 0;
3432 else
3434 lv = l1 & GET_MODE_MASK (op_mode);
3435 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
3436 && (lv & ((HOST_WIDE_INT) 1
3437 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
3438 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3440 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
3442 break;
3444 case SQRT:
3445 return 0;
3447 default:
3448 return 0;
3451 return immed_double_const (lv, hv, mode);
3454 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3455 else if (GET_CODE (op) == CONST_DOUBLE
3456 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3458 REAL_VALUE_TYPE d;
3459 jmp_buf handler;
3460 rtx x;
3462 if (setjmp (handler))
3463 /* There used to be a warning here, but that is inadvisable.
3464 People may want to cause traps, and the natural way
3465 to do it should not get a warning. */
3466 return 0;
3468 set_float_handler (handler);
3470 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3472 switch (code)
3474 case NEG:
3475 d = REAL_VALUE_NEGATE (d);
3476 break;
3478 case ABS:
3479 if (REAL_VALUE_NEGATIVE (d))
3480 d = REAL_VALUE_NEGATE (d);
3481 break;
3483 case FLOAT_TRUNCATE:
3484 d = real_value_truncate (mode, d);
3485 break;
3487 case FLOAT_EXTEND:
3488 /* All this does is change the mode. */
3489 break;
3491 case FIX:
3492 d = REAL_VALUE_RNDZINT (d);
3493 break;
3495 case UNSIGNED_FIX:
3496 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
3497 break;
3499 case SQRT:
3500 return 0;
3502 default:
3503 abort ();
3506 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3507 set_float_handler (NULL_PTR);
3508 return x;
3511 else if (GET_CODE (op) == CONST_DOUBLE
3512 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
3513 && GET_MODE_CLASS (mode) == MODE_INT
3514 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3516 REAL_VALUE_TYPE d;
3517 jmp_buf handler;
3518 HOST_WIDE_INT val;
3520 if (setjmp (handler))
3521 return 0;
3523 set_float_handler (handler);
3525 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3527 switch (code)
3529 case FIX:
3530 val = REAL_VALUE_FIX (d);
3531 break;
3533 case UNSIGNED_FIX:
3534 val = REAL_VALUE_UNSIGNED_FIX (d);
3535 break;
3537 default:
3538 abort ();
3541 set_float_handler (NULL_PTR);
3543 val = trunc_int_for_mode (val, mode);
3545 return GEN_INT (val);
3547 #endif
3548 /* This was formerly used only for non-IEEE float.
3549 eggert@twinsun.com says it is safe for IEEE also. */
3550 else
3552 /* There are some simplifications we can do even if the operands
3553 aren't constant. */
3554 switch (code)
3556 case NEG:
3557 case NOT:
3558 /* (not (not X)) == X, similarly for NEG. */
3559 if (GET_CODE (op) == code)
3560 return XEXP (op, 0);
3561 break;
3563 case SIGN_EXTEND:
3564 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
3565 becomes just the MINUS if its mode is MODE. This allows
3566 folding switch statements on machines using casesi (such as
3567 the Vax). */
3568 if (GET_CODE (op) == TRUNCATE
3569 && GET_MODE (XEXP (op, 0)) == mode
3570 && GET_CODE (XEXP (op, 0)) == MINUS
3571 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
3572 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
3573 return XEXP (op, 0);
3575 #ifdef POINTERS_EXTEND_UNSIGNED
3576 if (! POINTERS_EXTEND_UNSIGNED
3577 && mode == Pmode && GET_MODE (op) == ptr_mode
3578 && CONSTANT_P (op))
3579 return convert_memory_address (Pmode, op);
3580 #endif
3581 break;
3583 #ifdef POINTERS_EXTEND_UNSIGNED
3584 case ZERO_EXTEND:
3585 if (POINTERS_EXTEND_UNSIGNED
3586 && mode == Pmode && GET_MODE (op) == ptr_mode
3587 && CONSTANT_P (op))
3588 return convert_memory_address (Pmode, op);
3589 break;
3590 #endif
3592 default:
3593 break;
3596 return 0;
3600 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
3601 and OP1. Return 0 if no simplification is possible.
3603 Don't use this for relational operations such as EQ or LT.
3604 Use simplify_relational_operation instead. */
3607 simplify_binary_operation (code, mode, op0, op1)
3608 enum rtx_code code;
3609 enum machine_mode mode;
3610 rtx op0, op1;
3612 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3613 HOST_WIDE_INT val;
3614 int width = GET_MODE_BITSIZE (mode);
3615 rtx tem;
3617 /* Relational operations don't work here. We must know the mode
3618 of the operands in order to do the comparison correctly.
3619 Assuming a full word can give incorrect results.
3620 Consider comparing 128 with -128 in QImode. */
3622 if (GET_RTX_CLASS (code) == '<')
3623 abort ();
3625 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3626 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3627 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
3628 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3630 REAL_VALUE_TYPE f0, f1, value;
3631 jmp_buf handler;
3633 if (setjmp (handler))
3634 return 0;
3636 set_float_handler (handler);
3638 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3639 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3640 f0 = real_value_truncate (mode, f0);
3641 f1 = real_value_truncate (mode, f1);
3643 #ifdef REAL_ARITHMETIC
3644 #ifndef REAL_INFINITY
3645 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
3646 return 0;
3647 #endif
3648 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
3649 #else
3650 switch (code)
3652 case PLUS:
3653 value = f0 + f1;
3654 break;
3655 case MINUS:
3656 value = f0 - f1;
3657 break;
3658 case MULT:
3659 value = f0 * f1;
3660 break;
3661 case DIV:
3662 #ifndef REAL_INFINITY
3663 if (f1 == 0)
3664 return 0;
3665 #endif
3666 value = f0 / f1;
3667 break;
3668 case SMIN:
3669 value = MIN (f0, f1);
3670 break;
3671 case SMAX:
3672 value = MAX (f0, f1);
3673 break;
3674 default:
3675 abort ();
3677 #endif
3679 value = real_value_truncate (mode, value);
3680 set_float_handler (NULL_PTR);
3681 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
3683 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3685 /* We can fold some multi-word operations. */
3686 if (GET_MODE_CLASS (mode) == MODE_INT
3687 && width == HOST_BITS_PER_WIDE_INT * 2
3688 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3689 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3691 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
3693 if (GET_CODE (op0) == CONST_DOUBLE)
3694 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3695 else
3696 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
3698 if (GET_CODE (op1) == CONST_DOUBLE)
3699 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3700 else
3701 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
3703 switch (code)
3705 case MINUS:
3706 /* A - B == A + (-B). */
3707 neg_double (l2, h2, &lv, &hv);
3708 l2 = lv, h2 = hv;
3710 /* .. fall through ... */
3712 case PLUS:
3713 add_double (l1, h1, l2, h2, &lv, &hv);
3714 break;
3716 case MULT:
3717 mul_double (l1, h1, l2, h2, &lv, &hv);
3718 break;
3720 case DIV: case MOD: case UDIV: case UMOD:
3721 /* We'd need to include tree.h to do this and it doesn't seem worth
3722 it. */
3723 return 0;
3725 case AND:
3726 lv = l1 & l2, hv = h1 & h2;
3727 break;
3729 case IOR:
3730 lv = l1 | l2, hv = h1 | h2;
3731 break;
3733 case XOR:
3734 lv = l1 ^ l2, hv = h1 ^ h2;
3735 break;
3737 case SMIN:
3738 if (h1 < h2
3739 || (h1 == h2
3740 && ((unsigned HOST_WIDE_INT) l1
3741 < (unsigned HOST_WIDE_INT) l2)))
3742 lv = l1, hv = h1;
3743 else
3744 lv = l2, hv = h2;
3745 break;
3747 case SMAX:
3748 if (h1 > h2
3749 || (h1 == h2
3750 && ((unsigned HOST_WIDE_INT) l1
3751 > (unsigned HOST_WIDE_INT) l2)))
3752 lv = l1, hv = h1;
3753 else
3754 lv = l2, hv = h2;
3755 break;
3757 case UMIN:
3758 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3759 || (h1 == h2
3760 && ((unsigned HOST_WIDE_INT) l1
3761 < (unsigned HOST_WIDE_INT) l2)))
3762 lv = l1, hv = h1;
3763 else
3764 lv = l2, hv = h2;
3765 break;
3767 case UMAX:
3768 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3769 || (h1 == h2
3770 && ((unsigned HOST_WIDE_INT) l1
3771 > (unsigned HOST_WIDE_INT) l2)))
3772 lv = l1, hv = h1;
3773 else
3774 lv = l2, hv = h2;
3775 break;
3777 case LSHIFTRT: case ASHIFTRT:
3778 case ASHIFT:
3779 case ROTATE: case ROTATERT:
3780 #ifdef SHIFT_COUNT_TRUNCATED
3781 if (SHIFT_COUNT_TRUNCATED)
3782 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3783 #endif
3785 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
3786 return 0;
3788 if (code == LSHIFTRT || code == ASHIFTRT)
3789 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3790 code == ASHIFTRT);
3791 else if (code == ASHIFT)
3792 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3793 else if (code == ROTATE)
3794 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3795 else /* code == ROTATERT */
3796 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3797 break;
3799 default:
3800 return 0;
3803 return immed_double_const (lv, hv, mode);
3806 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
3807 || width > HOST_BITS_PER_WIDE_INT || width == 0)
3809 /* Even if we can't compute a constant result,
3810 there are some cases worth simplifying. */
3812 switch (code)
3814 case PLUS:
3815 /* In IEEE floating point, x+0 is not the same as x. Similarly
3816 for the other optimizations below. */
3817 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3818 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3819 break;
3821 if (op1 == CONST0_RTX (mode))
3822 return op0;
3824 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
3825 if (GET_CODE (op0) == NEG)
3826 return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
3827 else if (GET_CODE (op1) == NEG)
3828 return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
3830 /* Handle both-operands-constant cases. We can only add
3831 CONST_INTs to constants since the sum of relocatable symbols
3832 can't be handled by most assemblers. Don't add CONST_INT
3833 to CONST_INT since overflow won't be computed properly if wider
3834 than HOST_BITS_PER_WIDE_INT. */
3836 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
3837 && GET_CODE (op1) == CONST_INT)
3838 return plus_constant (op0, INTVAL (op1));
3839 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
3840 && GET_CODE (op0) == CONST_INT)
3841 return plus_constant (op1, INTVAL (op0));
3843 /* See if this is something like X * C - X or vice versa or
3844 if the multiplication is written as a shift. If so, we can
3845 distribute and make a new multiply, shift, or maybe just
3846 have X (if C is 2 in the example above). But don't make
3847 real multiply if we didn't have one before. */
3849 if (! FLOAT_MODE_P (mode))
3851 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3852 rtx lhs = op0, rhs = op1;
3853 int had_mult = 0;
3855 if (GET_CODE (lhs) == NEG)
3856 coeff0 = -1, lhs = XEXP (lhs, 0);
3857 else if (GET_CODE (lhs) == MULT
3858 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3860 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3861 had_mult = 1;
3863 else if (GET_CODE (lhs) == ASHIFT
3864 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3865 && INTVAL (XEXP (lhs, 1)) >= 0
3866 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3868 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3869 lhs = XEXP (lhs, 0);
3872 if (GET_CODE (rhs) == NEG)
3873 coeff1 = -1, rhs = XEXP (rhs, 0);
3874 else if (GET_CODE (rhs) == MULT
3875 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3877 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3878 had_mult = 1;
3880 else if (GET_CODE (rhs) == ASHIFT
3881 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3882 && INTVAL (XEXP (rhs, 1)) >= 0
3883 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3885 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3886 rhs = XEXP (rhs, 0);
3889 if (rtx_equal_p (lhs, rhs))
3891 tem = cse_gen_binary (MULT, mode, lhs,
3892 GEN_INT (coeff0 + coeff1));
3893 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3897 /* If one of the operands is a PLUS or a MINUS, see if we can
3898 simplify this by the associative law.
3899 Don't use the associative law for floating point.
3900 The inaccuracy makes it nonassociative,
3901 and subtle programs can break if operations are associated. */
3903 if (INTEGRAL_MODE_P (mode)
3904 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3905 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3906 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3907 return tem;
3908 break;
3910 case COMPARE:
3911 #ifdef HAVE_cc0
3912 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
3913 using cc0, in which case we want to leave it as a COMPARE
3914 so we can distinguish it from a register-register-copy.
3916 In IEEE floating point, x-0 is not the same as x. */
3918 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3919 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3920 && op1 == CONST0_RTX (mode))
3921 return op0;
3922 #else
3923 /* Do nothing here. */
3924 #endif
3925 break;
3927 case MINUS:
3928 /* None of these optimizations can be done for IEEE
3929 floating point. */
3930 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3931 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3932 break;
3934 /* We can't assume x-x is 0 even with non-IEEE floating point,
3935 but since it is zero except in very strange circumstances, we
3936 will treat it as zero with -ffast-math. */
3937 if (rtx_equal_p (op0, op1)
3938 && ! side_effects_p (op0)
3939 && (! FLOAT_MODE_P (mode) || flag_fast_math))
3940 return CONST0_RTX (mode);
3942 /* Change subtraction from zero into negation. */
3943 if (op0 == CONST0_RTX (mode))
3944 return gen_rtx_NEG (mode, op1);
3946 /* (-1 - a) is ~a. */
3947 if (op0 == constm1_rtx)
3948 return gen_rtx_NOT (mode, op1);
3950 /* Subtracting 0 has no effect. */
3951 if (op1 == CONST0_RTX (mode))
3952 return op0;
3954 /* See if this is something like X * C - X or vice versa or
3955 if the multiplication is written as a shift. If so, we can
3956 distribute and make a new multiply, shift, or maybe just
3957 have X (if C is 2 in the example above). But don't make
3958 real multiply if we didn't have one before. */
3960 if (! FLOAT_MODE_P (mode))
3962 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3963 rtx lhs = op0, rhs = op1;
3964 int had_mult = 0;
3966 if (GET_CODE (lhs) == NEG)
3967 coeff0 = -1, lhs = XEXP (lhs, 0);
3968 else if (GET_CODE (lhs) == MULT
3969 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3971 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3972 had_mult = 1;
3974 else if (GET_CODE (lhs) == ASHIFT
3975 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3976 && INTVAL (XEXP (lhs, 1)) >= 0
3977 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3979 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3980 lhs = XEXP (lhs, 0);
3983 if (GET_CODE (rhs) == NEG)
3984 coeff1 = - 1, rhs = XEXP (rhs, 0);
3985 else if (GET_CODE (rhs) == MULT
3986 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3988 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3989 had_mult = 1;
3991 else if (GET_CODE (rhs) == ASHIFT
3992 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3993 && INTVAL (XEXP (rhs, 1)) >= 0
3994 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3996 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3997 rhs = XEXP (rhs, 0);
4000 if (rtx_equal_p (lhs, rhs))
4002 tem = cse_gen_binary (MULT, mode, lhs,
4003 GEN_INT (coeff0 - coeff1));
4004 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
4008 /* (a - (-b)) -> (a + b). */
4009 if (GET_CODE (op1) == NEG)
4010 return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
4012 /* If one of the operands is a PLUS or a MINUS, see if we can
4013 simplify this by the associative law.
4014 Don't use the associative law for floating point.
4015 The inaccuracy makes it nonassociative,
4016 and subtle programs can break if operations are associated. */
4018 if (INTEGRAL_MODE_P (mode)
4019 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
4020 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
4021 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
4022 return tem;
4024 /* Don't let a relocatable value get a negative coeff. */
4025 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
4026 return plus_constant (op0, - INTVAL (op1));
4028 /* (x - (x & y)) -> (x & ~y) */
4029 if (GET_CODE (op1) == AND)
4031 if (rtx_equal_p (op0, XEXP (op1, 0)))
4032 return cse_gen_binary (AND, mode, op0,
4033 gen_rtx_NOT (mode, XEXP (op1, 1)));
4034 if (rtx_equal_p (op0, XEXP (op1, 1)))
4035 return cse_gen_binary (AND, mode, op0,
4036 gen_rtx_NOT (mode, XEXP (op1, 0)));
4038 break;
4040 case MULT:
4041 if (op1 == constm1_rtx)
4043 tem = simplify_unary_operation (NEG, mode, op0, mode);
4045 return tem ? tem : gen_rtx_NEG (mode, op0);
4048 /* In IEEE floating point, x*0 is not always 0. */
4049 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4050 || ! FLOAT_MODE_P (mode) || flag_fast_math)
4051 && op1 == CONST0_RTX (mode)
4052 && ! side_effects_p (op0))
4053 return op1;
4055 /* In IEEE floating point, x*1 is not equivalent to x for nans.
4056 However, ANSI says we can drop signals,
4057 so we can do this anyway. */
4058 if (op1 == CONST1_RTX (mode))
4059 return op0;
4061 /* Convert multiply by constant power of two into shift unless
4062 we are still generating RTL. This test is a kludge. */
4063 if (GET_CODE (op1) == CONST_INT
4064 && (val = exact_log2 (INTVAL (op1))) >= 0
4065 /* If the mode is larger than the host word size, and the
4066 uppermost bit is set, then this isn't a power of two due
4067 to implicit sign extension. */
4068 && (width <= HOST_BITS_PER_WIDE_INT
4069 || val != HOST_BITS_PER_WIDE_INT - 1)
4070 && ! rtx_equal_function_value_matters)
4071 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
4073 if (GET_CODE (op1) == CONST_DOUBLE
4074 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
4076 REAL_VALUE_TYPE d;
4077 jmp_buf handler;
4078 int op1is2, op1ism1;
4080 if (setjmp (handler))
4081 return 0;
4083 set_float_handler (handler);
4084 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
4085 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
4086 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
4087 set_float_handler (NULL_PTR);
4089 /* x*2 is x+x and x*(-1) is -x */
4090 if (op1is2 && GET_MODE (op0) == mode)
4091 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
4093 else if (op1ism1 && GET_MODE (op0) == mode)
4094 return gen_rtx_NEG (mode, op0);
4096 break;
4098 case IOR:
4099 if (op1 == const0_rtx)
4100 return op0;
4101 if (GET_CODE (op1) == CONST_INT
4102 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
4103 return op1;
4104 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4105 return op0;
4106 /* A | (~A) -> -1 */
4107 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
4108 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
4109 && ! side_effects_p (op0)
4110 && GET_MODE_CLASS (mode) != MODE_CC)
4111 return constm1_rtx;
4112 break;
4114 case XOR:
4115 if (op1 == const0_rtx)
4116 return op0;
4117 if (GET_CODE (op1) == CONST_INT
4118 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
4119 return gen_rtx_NOT (mode, op0);
4120 if (op0 == op1 && ! side_effects_p (op0)
4121 && GET_MODE_CLASS (mode) != MODE_CC)
4122 return const0_rtx;
4123 break;
4125 case AND:
4126 if (op1 == const0_rtx && ! side_effects_p (op0))
4127 return const0_rtx;
4128 if (GET_CODE (op1) == CONST_INT
4129 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
4130 return op0;
4131 if (op0 == op1 && ! side_effects_p (op0)
4132 && GET_MODE_CLASS (mode) != MODE_CC)
4133 return op0;
4134 /* A & (~A) -> 0 */
4135 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
4136 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
4137 && ! side_effects_p (op0)
4138 && GET_MODE_CLASS (mode) != MODE_CC)
4139 return const0_rtx;
4140 break;
4142 case UDIV:
4143 /* Convert divide by power of two into shift (divide by 1 handled
4144 below). */
4145 if (GET_CODE (op1) == CONST_INT
4146 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
4147 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
4149 /* ... fall through ... */
4151 case DIV:
4152 if (op1 == CONST1_RTX (mode))
4153 return op0;
4155 /* In IEEE floating point, 0/x is not always 0. */
4156 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4157 || ! FLOAT_MODE_P (mode) || flag_fast_math)
4158 && op0 == CONST0_RTX (mode)
4159 && ! side_effects_p (op1))
4160 return op0;
4162 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4163 /* Change division by a constant into multiplication. Only do
4164 this with -ffast-math until an expert says it is safe in
4165 general. */
4166 else if (GET_CODE (op1) == CONST_DOUBLE
4167 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
4168 && op1 != CONST0_RTX (mode)
4169 && flag_fast_math)
4171 REAL_VALUE_TYPE d;
4172 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
4174 if (! REAL_VALUES_EQUAL (d, dconst0))
4176 #if defined (REAL_ARITHMETIC)
4177 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
4178 return gen_rtx_MULT (mode, op0,
4179 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
4180 #else
4181 return
4182 gen_rtx_MULT (mode, op0,
4183 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
4184 #endif
4187 #endif
4188 break;
4190 case UMOD:
4191 /* Handle modulus by power of two (mod with 1 handled below). */
4192 if (GET_CODE (op1) == CONST_INT
4193 && exact_log2 (INTVAL (op1)) > 0)
4194 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
4196 /* ... fall through ... */
4198 case MOD:
4199 if ((op0 == const0_rtx || op1 == const1_rtx)
4200 && ! side_effects_p (op0) && ! side_effects_p (op1))
4201 return const0_rtx;
4202 break;
4204 case ROTATERT:
4205 case ROTATE:
4206 /* Rotating ~0 always results in ~0. */
4207 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
4208 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
4209 && ! side_effects_p (op1))
4210 return op0;
4212 /* ... fall through ... */
4214 case ASHIFT:
4215 case ASHIFTRT:
4216 case LSHIFTRT:
4217 if (op1 == const0_rtx)
4218 return op0;
4219 if (op0 == const0_rtx && ! side_effects_p (op1))
4220 return op0;
4221 break;
4223 case SMIN:
4224 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4225 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
4226 && ! side_effects_p (op0))
4227 return op1;
4228 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4229 return op0;
4230 break;
4232 case SMAX:
4233 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4234 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
4235 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
4236 && ! side_effects_p (op0))
4237 return op1;
4238 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4239 return op0;
4240 break;
4242 case UMIN:
4243 if (op1 == const0_rtx && ! side_effects_p (op0))
4244 return op1;
4245 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4246 return op0;
4247 break;
4249 case UMAX:
4250 if (op1 == constm1_rtx && ! side_effects_p (op0))
4251 return op1;
4252 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4253 return op0;
4254 break;
4256 default:
4257 abort ();
4260 return 0;
4263 /* Get the integer argument values in two forms:
4264 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4266 arg0 = INTVAL (op0);
4267 arg1 = INTVAL (op1);
4269 if (width < HOST_BITS_PER_WIDE_INT)
4271 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
4272 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
4274 arg0s = arg0;
4275 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4276 arg0s |= ((HOST_WIDE_INT) (-1) << width);
4278 arg1s = arg1;
4279 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4280 arg1s |= ((HOST_WIDE_INT) (-1) << width);
4282 else
4284 arg0s = arg0;
4285 arg1s = arg1;
4288 /* Compute the value of the arithmetic. */
4290 switch (code)
4292 case PLUS:
4293 val = arg0s + arg1s;
4294 break;
4296 case MINUS:
4297 val = arg0s - arg1s;
4298 break;
4300 case MULT:
4301 val = arg0s * arg1s;
4302 break;
4304 case DIV:
4305 if (arg1s == 0)
4306 return 0;
4307 val = arg0s / arg1s;
4308 break;
4310 case MOD:
4311 if (arg1s == 0)
4312 return 0;
4313 val = arg0s % arg1s;
4314 break;
4316 case UDIV:
4317 if (arg1 == 0)
4318 return 0;
4319 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4320 break;
4322 case UMOD:
4323 if (arg1 == 0)
4324 return 0;
4325 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4326 break;
4328 case AND:
4329 val = arg0 & arg1;
4330 break;
4332 case IOR:
4333 val = arg0 | arg1;
4334 break;
4336 case XOR:
4337 val = arg0 ^ arg1;
4338 break;
4340 case LSHIFTRT:
4341 /* If shift count is undefined, don't fold it; let the machine do
4342 what it wants. But truncate it if the machine will do that. */
4343 if (arg1 < 0)
4344 return 0;
4346 #ifdef SHIFT_COUNT_TRUNCATED
4347 if (SHIFT_COUNT_TRUNCATED)
4348 arg1 %= width;
4349 #endif
4351 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
4352 break;
4354 case ASHIFT:
4355 if (arg1 < 0)
4356 return 0;
4358 #ifdef SHIFT_COUNT_TRUNCATED
4359 if (SHIFT_COUNT_TRUNCATED)
4360 arg1 %= width;
4361 #endif
4363 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
4364 break;
4366 case ASHIFTRT:
4367 if (arg1 < 0)
4368 return 0;
4370 #ifdef SHIFT_COUNT_TRUNCATED
4371 if (SHIFT_COUNT_TRUNCATED)
4372 arg1 %= width;
4373 #endif
4375 val = arg0s >> arg1;
4377 /* Bootstrap compiler may not have sign extended the right shift.
4378 Manually extend the sign to insure bootstrap cc matches gcc. */
4379 if (arg0s < 0 && arg1 > 0)
4380 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
4382 break;
4384 case ROTATERT:
4385 if (arg1 < 0)
4386 return 0;
4388 arg1 %= width;
4389 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4390 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4391 break;
4393 case ROTATE:
4394 if (arg1 < 0)
4395 return 0;
4397 arg1 %= width;
4398 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4399 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4400 break;
4402 case COMPARE:
4403 /* Do nothing here. */
4404 return 0;
4406 case SMIN:
4407 val = arg0s <= arg1s ? arg0s : arg1s;
4408 break;
4410 case UMIN:
4411 val = ((unsigned HOST_WIDE_INT) arg0
4412 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4413 break;
4415 case SMAX:
4416 val = arg0s > arg1s ? arg0s : arg1s;
4417 break;
4419 case UMAX:
4420 val = ((unsigned HOST_WIDE_INT) arg0
4421 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4422 break;
4424 default:
4425 abort ();
4428 val = trunc_int_for_mode (val, mode);
4430 return GEN_INT (val);
4433 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4434 PLUS or MINUS.
4436 Rather than test for specific case, we do this by a brute-force method
4437 and do all possible simplifications until no more changes occur. Then
4438 we rebuild the operation. */
4440 static rtx
4441 simplify_plus_minus (code, mode, op0, op1)
4442 enum rtx_code code;
4443 enum machine_mode mode;
4444 rtx op0, op1;
4446 rtx ops[8];
4447 int negs[8];
4448 rtx result, tem;
4449 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
4450 int first = 1, negate = 0, changed;
4451 int i, j;
4453 bzero ((char *) ops, sizeof ops);
4455 /* Set up the two operands and then expand them until nothing has been
4456 changed. If we run out of room in our array, give up; this should
4457 almost never happen. */
4459 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
4461 changed = 1;
4462 while (changed)
4464 changed = 0;
4466 for (i = 0; i < n_ops; i++)
4467 switch (GET_CODE (ops[i]))
4469 case PLUS:
4470 case MINUS:
4471 if (n_ops == 7)
4472 return 0;
4474 ops[n_ops] = XEXP (ops[i], 1);
4475 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
4476 ops[i] = XEXP (ops[i], 0);
4477 input_ops++;
4478 changed = 1;
4479 break;
4481 case NEG:
4482 ops[i] = XEXP (ops[i], 0);
4483 negs[i] = ! negs[i];
4484 changed = 1;
4485 break;
4487 case CONST:
4488 ops[i] = XEXP (ops[i], 0);
4489 input_consts++;
4490 changed = 1;
4491 break;
4493 case NOT:
4494 /* ~a -> (-a - 1) */
4495 if (n_ops != 7)
4497 ops[n_ops] = constm1_rtx;
4498 negs[n_ops++] = negs[i];
4499 ops[i] = XEXP (ops[i], 0);
4500 negs[i] = ! negs[i];
4501 changed = 1;
4503 break;
4505 case CONST_INT:
4506 if (negs[i])
4507 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
4508 break;
4510 default:
4511 break;
4515 /* If we only have two operands, we can't do anything. */
4516 if (n_ops <= 2)
4517 return 0;
4519 /* Now simplify each pair of operands until nothing changes. The first
4520 time through just simplify constants against each other. */
4522 changed = 1;
4523 while (changed)
4525 changed = first;
4527 for (i = 0; i < n_ops - 1; i++)
4528 for (j = i + 1; j < n_ops; j++)
4529 if (ops[i] != 0 && ops[j] != 0
4530 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
4532 rtx lhs = ops[i], rhs = ops[j];
4533 enum rtx_code ncode = PLUS;
4535 if (negs[i] && ! negs[j])
4536 lhs = ops[j], rhs = ops[i], ncode = MINUS;
4537 else if (! negs[i] && negs[j])
4538 ncode = MINUS;
4540 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4541 if (tem)
4543 ops[i] = tem, ops[j] = 0;
4544 negs[i] = negs[i] && negs[j];
4545 if (GET_CODE (tem) == NEG)
4546 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
4548 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
4549 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
4550 changed = 1;
4554 first = 0;
4557 /* Pack all the operands to the lower-numbered entries and give up if
4558 we didn't reduce the number of operands we had. Make sure we
4559 count a CONST as two operands. If we have the same number of
4560 operands, but have made more CONSTs than we had, this is also
4561 an improvement, so accept it. */
4563 for (i = 0, j = 0; j < n_ops; j++)
4564 if (ops[j] != 0)
4566 ops[i] = ops[j], negs[i++] = negs[j];
4567 if (GET_CODE (ops[j]) == CONST)
4568 n_consts++;
4571 if (i + n_consts > input_ops
4572 || (i + n_consts == input_ops && n_consts <= input_consts))
4573 return 0;
4575 n_ops = i;
4577 /* If we have a CONST_INT, put it last. */
4578 for (i = 0; i < n_ops - 1; i++)
4579 if (GET_CODE (ops[i]) == CONST_INT)
4581 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
4582 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
4585 /* Put a non-negated operand first. If there aren't any, make all
4586 operands positive and negate the whole thing later. */
4587 for (i = 0; i < n_ops && negs[i]; i++)
4590 if (i == n_ops)
4592 for (i = 0; i < n_ops; i++)
4593 negs[i] = 0;
4594 negate = 1;
4596 else if (i != 0)
4598 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
4599 j = negs[0], negs[0] = negs[i], negs[i] = j;
4602 /* Now make the result by performing the requested operations. */
4603 result = ops[0];
4604 for (i = 1; i < n_ops; i++)
4605 result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
4607 return negate ? gen_rtx_NEG (mode, result) : result;
4610 /* Make a binary operation by properly ordering the operands and
4611 seeing if the expression folds. */
4613 static rtx
4614 cse_gen_binary (code, mode, op0, op1)
4615 enum rtx_code code;
4616 enum machine_mode mode;
4617 rtx op0, op1;
4619 rtx tem;
4621 /* Put complex operands first and constants second if commutative. */
4622 if (GET_RTX_CLASS (code) == 'c'
4623 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
4624 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
4625 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
4626 || (GET_CODE (op0) == SUBREG
4627 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
4628 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
4629 tem = op0, op0 = op1, op1 = tem;
4631 /* If this simplifies, do it. */
4632 tem = simplify_binary_operation (code, mode, op0, op1);
4634 if (tem)
4635 return tem;
4637 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
4638 just form the operation. */
4640 if (code == PLUS && GET_CODE (op1) == CONST_INT
4641 && GET_MODE (op0) != VOIDmode)
4642 return plus_constant (op0, INTVAL (op1));
4643 else if (code == MINUS && GET_CODE (op1) == CONST_INT
4644 && GET_MODE (op0) != VOIDmode)
4645 return plus_constant (op0, - INTVAL (op1));
4646 else
4647 return gen_rtx_fmt_ee (code, mode, op0, op1);
4650 struct cfc_args
4652 /* Input */
4653 rtx op0, op1;
4654 /* Output */
4655 int equal, op0lt, op1lt;
4658 static void
4659 check_fold_consts (data)
4660 PTR data;
4662 struct cfc_args * args = (struct cfc_args *) data;
4663 REAL_VALUE_TYPE d0, d1;
4665 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
4666 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
4667 args->equal = REAL_VALUES_EQUAL (d0, d1);
4668 args->op0lt = REAL_VALUES_LESS (d0, d1);
4669 args->op1lt = REAL_VALUES_LESS (d1, d0);
4672 /* Like simplify_binary_operation except used for relational operators.
4673 MODE is the mode of the operands, not that of the result. If MODE
4674 is VOIDmode, both operands must also be VOIDmode and we compare the
4675 operands in "infinite precision".
4677 If no simplification is possible, this function returns zero. Otherwise,
4678 it returns either const_true_rtx or const0_rtx. */
4681 simplify_relational_operation (code, mode, op0, op1)
4682 enum rtx_code code;
4683 enum machine_mode mode;
4684 rtx op0, op1;
4686 int equal, op0lt, op0ltu, op1lt, op1ltu;
4687 rtx tem;
4689 /* If op0 is a compare, extract the comparison arguments from it. */
4690 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4691 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
4693 /* We can't simplify MODE_CC values since we don't know what the
4694 actual comparison is. */
4695 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
4696 #ifdef HAVE_cc0
4697 || op0 == cc0_rtx
4698 #endif
4700 return 0;
4702 /* For integer comparisons of A and B maybe we can simplify A - B and can
4703 then simplify a comparison of that with zero. If A and B are both either
4704 a register or a CONST_INT, this can't help; testing for these cases will
4705 prevent infinite recursion here and speed things up.
4707 If CODE is an unsigned comparison, then we can never do this optimization,
4708 because it gives an incorrect result if the subtraction wraps around zero.
4709 ANSI C defines unsigned operations such that they never overflow, and
4710 thus such cases can not be ignored. */
4712 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
4713 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
4714 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
4715 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4716 && code != GTU && code != GEU && code != LTU && code != LEU)
4717 return simplify_relational_operation (signed_condition (code),
4718 mode, tem, const0_rtx);
4720 /* For non-IEEE floating-point, if the two operands are equal, we know the
4721 result. */
4722 if (rtx_equal_p (op0, op1)
4723 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4724 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
4725 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
4727 /* If the operands are floating-point constants, see if we can fold
4728 the result. */
4729 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4730 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
4731 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
4733 struct cfc_args args;
4735 /* Setup input for check_fold_consts() */
4736 args.op0 = op0;
4737 args.op1 = op1;
4739 if (do_float_handler(check_fold_consts, (PTR) &args) == 0)
4740 /* We got an exception from check_fold_consts() */
4741 return 0;
4743 /* Receive output from check_fold_consts() */
4744 equal = args.equal;
4745 op0lt = op0ltu = args.op0lt;
4746 op1lt = op1ltu = args.op1lt;
4748 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4750 /* Otherwise, see if the operands are both integers. */
4751 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4752 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
4753 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
4755 int width = GET_MODE_BITSIZE (mode);
4756 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4757 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4759 /* Get the two words comprising each integer constant. */
4760 if (GET_CODE (op0) == CONST_DOUBLE)
4762 l0u = l0s = CONST_DOUBLE_LOW (op0);
4763 h0u = h0s = CONST_DOUBLE_HIGH (op0);
4765 else
4767 l0u = l0s = INTVAL (op0);
4768 h0u = h0s = l0s < 0 ? -1 : 0;
4771 if (GET_CODE (op1) == CONST_DOUBLE)
4773 l1u = l1s = CONST_DOUBLE_LOW (op1);
4774 h1u = h1s = CONST_DOUBLE_HIGH (op1);
4776 else
4778 l1u = l1s = INTVAL (op1);
4779 h1u = h1s = l1s < 0 ? -1 : 0;
4782 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4783 we have to sign or zero-extend the values. */
4784 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4785 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
4787 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4789 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4790 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4792 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4793 l0s |= ((HOST_WIDE_INT) (-1) << width);
4795 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4796 l1s |= ((HOST_WIDE_INT) (-1) << width);
4799 equal = (h0u == h1u && l0u == l1u);
4800 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
4801 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
4802 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4803 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4806 /* Otherwise, there are some code-specific tests we can make. */
4807 else
4809 switch (code)
4811 case EQ:
4812 /* References to the frame plus a constant or labels cannot
4813 be zero, but a SYMBOL_REF can due to #pragma weak. */
4814 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4815 || GET_CODE (op0) == LABEL_REF)
4816 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4817 /* On some machines, the ap reg can be 0 sometimes. */
4818 && op0 != arg_pointer_rtx
4819 #endif
4821 return const0_rtx;
4822 break;
4824 case NE:
4825 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4826 || GET_CODE (op0) == LABEL_REF)
4827 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4828 && op0 != arg_pointer_rtx
4829 #endif
4831 return const_true_rtx;
4832 break;
4834 case GEU:
4835 /* Unsigned values are never negative. */
4836 if (op1 == const0_rtx)
4837 return const_true_rtx;
4838 break;
4840 case LTU:
4841 if (op1 == const0_rtx)
4842 return const0_rtx;
4843 break;
4845 case LEU:
4846 /* Unsigned values are never greater than the largest
4847 unsigned value. */
4848 if (GET_CODE (op1) == CONST_INT
4849 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
4850 && INTEGRAL_MODE_P (mode))
4851 return const_true_rtx;
4852 break;
4854 case GTU:
4855 if (GET_CODE (op1) == CONST_INT
4856 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
4857 && INTEGRAL_MODE_P (mode))
4858 return const0_rtx;
4859 break;
4861 default:
4862 break;
4865 return 0;
4868 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4869 as appropriate. */
4870 switch (code)
4872 case EQ:
4873 return equal ? const_true_rtx : const0_rtx;
4874 case NE:
4875 return ! equal ? const_true_rtx : const0_rtx;
4876 case LT:
4877 return op0lt ? const_true_rtx : const0_rtx;
4878 case GT:
4879 return op1lt ? const_true_rtx : const0_rtx;
4880 case LTU:
4881 return op0ltu ? const_true_rtx : const0_rtx;
4882 case GTU:
4883 return op1ltu ? const_true_rtx : const0_rtx;
4884 case LE:
4885 return equal || op0lt ? const_true_rtx : const0_rtx;
4886 case GE:
4887 return equal || op1lt ? const_true_rtx : const0_rtx;
4888 case LEU:
4889 return equal || op0ltu ? const_true_rtx : const0_rtx;
4890 case GEU:
4891 return equal || op1ltu ? const_true_rtx : const0_rtx;
4892 default:
4893 abort ();
4897 /* Simplify CODE, an operation with result mode MODE and three operands,
4898 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4899 a constant. Return 0 if no simplifications is possible. */
4902 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
4903 enum rtx_code code;
4904 enum machine_mode mode, op0_mode;
4905 rtx op0, op1, op2;
4907 int width = GET_MODE_BITSIZE (mode);
4909 /* VOIDmode means "infinite" precision. */
4910 if (width == 0)
4911 width = HOST_BITS_PER_WIDE_INT;
4913 switch (code)
4915 case SIGN_EXTRACT:
4916 case ZERO_EXTRACT:
4917 if (GET_CODE (op0) == CONST_INT
4918 && GET_CODE (op1) == CONST_INT
4919 && GET_CODE (op2) == CONST_INT
4920 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
4921 && width <= HOST_BITS_PER_WIDE_INT)
4923 /* Extracting a bit-field from a constant */
4924 HOST_WIDE_INT val = INTVAL (op0);
4926 if (BITS_BIG_ENDIAN)
4927 val >>= (GET_MODE_BITSIZE (op0_mode)
4928 - INTVAL (op2) - INTVAL (op1));
4929 else
4930 val >>= INTVAL (op2);
4932 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4934 /* First zero-extend. */
4935 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4936 /* If desired, propagate sign bit. */
4937 if (code == SIGN_EXTRACT
4938 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4939 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4942 /* Clear the bits that don't belong in our mode,
4943 unless they and our sign bit are all one.
4944 So we get either a reasonable negative value or a reasonable
4945 unsigned value for this mode. */
4946 if (width < HOST_BITS_PER_WIDE_INT
4947 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4948 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4949 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4951 return GEN_INT (val);
4953 break;
4955 case IF_THEN_ELSE:
4956 if (GET_CODE (op0) == CONST_INT)
4957 return op0 != const0_rtx ? op1 : op2;
4959 /* Convert a == b ? b : a to "a". */
4960 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
4961 && rtx_equal_p (XEXP (op0, 0), op1)
4962 && rtx_equal_p (XEXP (op0, 1), op2))
4963 return op1;
4964 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
4965 && rtx_equal_p (XEXP (op0, 1), op1)
4966 && rtx_equal_p (XEXP (op0, 0), op2))
4967 return op2;
4968 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
4970 rtx temp;
4971 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4972 XEXP (op0, 0), XEXP (op0, 1));
4973 /* See if any simplifications were possible. */
4974 if (temp == const0_rtx)
4975 return op2;
4976 else if (temp == const1_rtx)
4977 return op1;
4979 break;
4981 default:
4982 abort ();
4985 return 0;
4988 /* If X is a nontrivial arithmetic operation on an argument
4989 for which a constant value can be determined, return
4990 the result of operating on that value, as a constant.
4991 Otherwise, return X, possibly with one or more operands
4992 modified by recursive calls to this function.
4994 If X is a register whose contents are known, we do NOT
4995 return those contents here. equiv_constant is called to
4996 perform that task.
4998 INSN is the insn that we may be modifying. If it is 0, make a copy
4999 of X before modifying it. */
5001 static rtx
5002 fold_rtx (x, insn)
5003 rtx x;
5004 rtx insn;
5006 register enum rtx_code code;
5007 register enum machine_mode mode;
5008 register const char *fmt;
5009 register int i;
5010 rtx new = 0;
5011 int copied = 0;
5012 int must_swap = 0;
5014 /* Folded equivalents of first two operands of X. */
5015 rtx folded_arg0;
5016 rtx folded_arg1;
5018 /* Constant equivalents of first three operands of X;
5019 0 when no such equivalent is known. */
5020 rtx const_arg0;
5021 rtx const_arg1;
5022 rtx const_arg2;
5024 /* The mode of the first operand of X. We need this for sign and zero
5025 extends. */
5026 enum machine_mode mode_arg0;
5028 if (x == 0)
5029 return x;
5031 mode = GET_MODE (x);
5032 code = GET_CODE (x);
5033 switch (code)
5035 case CONST:
5036 case CONST_INT:
5037 case CONST_DOUBLE:
5038 case SYMBOL_REF:
5039 case LABEL_REF:
5040 case REG:
5041 /* No use simplifying an EXPR_LIST
5042 since they are used only for lists of args
5043 in a function call's REG_EQUAL note. */
5044 case EXPR_LIST:
5045 /* Changing anything inside an ADDRESSOF is incorrect; we don't
5046 want to (e.g.,) make (addressof (const_int 0)) just because
5047 the location is known to be zero. */
5048 case ADDRESSOF:
5049 return x;
5051 #ifdef HAVE_cc0
5052 case CC0:
5053 return prev_insn_cc0;
5054 #endif
5056 case PC:
5057 /* If the next insn is a CODE_LABEL followed by a jump table,
5058 PC's value is a LABEL_REF pointing to that label. That
5059 lets us fold switch statements on the Vax. */
5060 if (insn && GET_CODE (insn) == JUMP_INSN)
5062 rtx next = next_nonnote_insn (insn);
5064 if (next && GET_CODE (next) == CODE_LABEL
5065 && NEXT_INSN (next) != 0
5066 && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
5067 && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
5068 || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
5069 return gen_rtx_LABEL_REF (Pmode, next);
5071 break;
5073 case SUBREG:
5074 /* See if we previously assigned a constant value to this SUBREG. */
5075 if ((new = lookup_as_function (x, CONST_INT)) != 0
5076 || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
5077 return new;
5079 /* If this is a paradoxical SUBREG, we have no idea what value the
5080 extra bits would have. However, if the operand is equivalent
5081 to a SUBREG whose operand is the same as our mode, and all the
5082 modes are within a word, we can just use the inner operand
5083 because these SUBREGs just say how to treat the register.
5085 Similarly if we find an integer constant. */
5087 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
5089 enum machine_mode imode = GET_MODE (SUBREG_REG (x));
5090 struct table_elt *elt;
5092 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
5093 && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
5094 && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
5095 imode)) != 0)
5096 for (elt = elt->first_same_value;
5097 elt; elt = elt->next_same_value)
5099 if (CONSTANT_P (elt->exp)
5100 && GET_MODE (elt->exp) == VOIDmode)
5101 return elt->exp;
5103 if (GET_CODE (elt->exp) == SUBREG
5104 && GET_MODE (SUBREG_REG (elt->exp)) == mode
5105 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
5106 return copy_rtx (SUBREG_REG (elt->exp));
5109 return x;
5112 /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
5113 We might be able to if the SUBREG is extracting a single word in an
5114 integral mode or extracting the low part. */
5116 folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
5117 const_arg0 = equiv_constant (folded_arg0);
5118 if (const_arg0)
5119 folded_arg0 = const_arg0;
5121 if (folded_arg0 != SUBREG_REG (x))
5123 new = 0;
5125 if (GET_MODE_CLASS (mode) == MODE_INT
5126 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
5127 && GET_MODE (SUBREG_REG (x)) != VOIDmode)
5128 new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
5129 GET_MODE (SUBREG_REG (x)));
5130 if (new == 0 && subreg_lowpart_p (x))
5131 new = gen_lowpart_if_possible (mode, folded_arg0);
5132 if (new)
5133 return new;
5136 /* If this is a narrowing SUBREG and our operand is a REG, see if
5137 we can find an equivalence for REG that is an arithmetic operation
5138 in a wider mode where both operands are paradoxical SUBREGs
5139 from objects of our result mode. In that case, we couldn't report
5140 an equivalent value for that operation, since we don't know what the
5141 extra bits will be. But we can find an equivalence for this SUBREG
5142 by folding that operation is the narrow mode. This allows us to
5143 fold arithmetic in narrow modes when the machine only supports
5144 word-sized arithmetic.
5146 Also look for a case where we have a SUBREG whose operand is the
5147 same as our result. If both modes are smaller than a word, we
5148 are simply interpreting a register in different modes and we
5149 can use the inner value. */
5151 if (GET_CODE (folded_arg0) == REG
5152 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
5153 && subreg_lowpart_p (x))
5155 struct table_elt *elt;
5157 /* We can use HASH here since we know that canon_hash won't be
5158 called. */
5159 elt = lookup (folded_arg0,
5160 HASH (folded_arg0, GET_MODE (folded_arg0)),
5161 GET_MODE (folded_arg0));
5163 if (elt)
5164 elt = elt->first_same_value;
5166 for (; elt; elt = elt->next_same_value)
5168 enum rtx_code eltcode = GET_CODE (elt->exp);
5170 /* Just check for unary and binary operations. */
5171 if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
5172 && GET_CODE (elt->exp) != SIGN_EXTEND
5173 && GET_CODE (elt->exp) != ZERO_EXTEND
5174 && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
5175 && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
5177 rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
5179 if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
5180 op0 = fold_rtx (op0, NULL_RTX);
5182 op0 = equiv_constant (op0);
5183 if (op0)
5184 new = simplify_unary_operation (GET_CODE (elt->exp), mode,
5185 op0, mode);
5187 else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
5188 || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
5189 && eltcode != DIV && eltcode != MOD
5190 && eltcode != UDIV && eltcode != UMOD
5191 && eltcode != ASHIFTRT && eltcode != LSHIFTRT
5192 && eltcode != ROTATE && eltcode != ROTATERT
5193 && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
5194 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
5195 == mode))
5196 || CONSTANT_P (XEXP (elt->exp, 0)))
5197 && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
5198 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
5199 == mode))
5200 || CONSTANT_P (XEXP (elt->exp, 1))))
5202 rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
5203 rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
5205 if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
5206 op0 = fold_rtx (op0, NULL_RTX);
5208 if (op0)
5209 op0 = equiv_constant (op0);
5211 if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
5212 op1 = fold_rtx (op1, NULL_RTX);
5214 if (op1)
5215 op1 = equiv_constant (op1);
5217 /* If we are looking for the low SImode part of
5218 (ashift:DI c (const_int 32)), it doesn't work
5219 to compute that in SImode, because a 32-bit shift
5220 in SImode is unpredictable. We know the value is 0. */
5221 if (op0 && op1
5222 && GET_CODE (elt->exp) == ASHIFT
5223 && GET_CODE (op1) == CONST_INT
5224 && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
5226 if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
5228 /* If the count fits in the inner mode's width,
5229 but exceeds the outer mode's width,
5230 the value will get truncated to 0
5231 by the subreg. */
5232 new = const0_rtx;
5233 else
5234 /* If the count exceeds even the inner mode's width,
5235 don't fold this expression. */
5236 new = 0;
5238 else if (op0 && op1)
5239 new = simplify_binary_operation (GET_CODE (elt->exp), mode,
5240 op0, op1);
5243 else if (GET_CODE (elt->exp) == SUBREG
5244 && GET_MODE (SUBREG_REG (elt->exp)) == mode
5245 && (GET_MODE_SIZE (GET_MODE (folded_arg0))
5246 <= UNITS_PER_WORD)
5247 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
5248 new = copy_rtx (SUBREG_REG (elt->exp));
5250 if (new)
5251 return new;
5255 return x;
5257 case NOT:
5258 case NEG:
5259 /* If we have (NOT Y), see if Y is known to be (NOT Z).
5260 If so, (NOT Y) simplifies to Z. Similarly for NEG. */
5261 new = lookup_as_function (XEXP (x, 0), code);
5262 if (new)
5263 return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
5264 break;
5266 case MEM:
5267 /* If we are not actually processing an insn, don't try to find the
5268 best address. Not only don't we care, but we could modify the
5269 MEM in an invalid way since we have no insn to validate against. */
5270 if (insn != 0)
5271 find_best_addr (insn, &XEXP (x, 0));
5274 /* Even if we don't fold in the insn itself,
5275 we can safely do so here, in hopes of getting a constant. */
5276 rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
5277 rtx base = 0;
5278 HOST_WIDE_INT offset = 0;
5280 if (GET_CODE (addr) == REG
5281 && REGNO_QTY_VALID_P (REGNO (addr))
5282 && GET_MODE (addr) == qty_mode[REG_QTY (REGNO (addr))]
5283 && qty_const[REG_QTY (REGNO (addr))] != 0)
5284 addr = qty_const[REG_QTY (REGNO (addr))];
5286 /* If address is constant, split it into a base and integer offset. */
5287 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
5288 base = addr;
5289 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5290 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5292 base = XEXP (XEXP (addr, 0), 0);
5293 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
5295 else if (GET_CODE (addr) == LO_SUM
5296 && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
5297 base = XEXP (addr, 1);
5298 else if (GET_CODE (addr) == ADDRESSOF)
5299 return change_address (x, VOIDmode, addr);
5301 /* If this is a constant pool reference, we can fold it into its
5302 constant to allow better value tracking. */
5303 if (base && GET_CODE (base) == SYMBOL_REF
5304 && CONSTANT_POOL_ADDRESS_P (base))
5306 rtx constant = get_pool_constant (base);
5307 enum machine_mode const_mode = get_pool_mode (base);
5308 rtx new;
5310 if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
5311 constant_pool_entries_cost = COST (constant);
5313 /* If we are loading the full constant, we have an equivalence. */
5314 if (offset == 0 && mode == const_mode)
5315 return constant;
5317 /* If this actually isn't a constant (weird!), we can't do
5318 anything. Otherwise, handle the two most common cases:
5319 extracting a word from a multi-word constant, and extracting
5320 the low-order bits. Other cases don't seem common enough to
5321 worry about. */
5322 if (! CONSTANT_P (constant))
5323 return x;
5325 if (GET_MODE_CLASS (mode) == MODE_INT
5326 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
5327 && offset % UNITS_PER_WORD == 0
5328 && (new = operand_subword (constant,
5329 offset / UNITS_PER_WORD,
5330 0, const_mode)) != 0)
5331 return new;
5333 if (((BYTES_BIG_ENDIAN
5334 && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
5335 || (! BYTES_BIG_ENDIAN && offset == 0))
5336 && (new = gen_lowpart_if_possible (mode, constant)) != 0)
5337 return new;
5340 /* If this is a reference to a label at a known position in a jump
5341 table, we also know its value. */
5342 if (base && GET_CODE (base) == LABEL_REF)
5344 rtx label = XEXP (base, 0);
5345 rtx table_insn = NEXT_INSN (label);
5347 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5348 && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
5350 rtx table = PATTERN (table_insn);
5352 if (offset >= 0
5353 && (offset / GET_MODE_SIZE (GET_MODE (table))
5354 < XVECLEN (table, 0)))
5355 return XVECEXP (table, 0,
5356 offset / GET_MODE_SIZE (GET_MODE (table)));
5358 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5359 && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
5361 rtx table = PATTERN (table_insn);
5363 if (offset >= 0
5364 && (offset / GET_MODE_SIZE (GET_MODE (table))
5365 < XVECLEN (table, 1)))
5367 offset /= GET_MODE_SIZE (GET_MODE (table));
5368 new = gen_rtx_MINUS (Pmode, XVECEXP (table, 1, offset),
5369 XEXP (table, 0));
5371 if (GET_MODE (table) != Pmode)
5372 new = gen_rtx_TRUNCATE (GET_MODE (table), new);
5374 /* Indicate this is a constant. This isn't a
5375 valid form of CONST, but it will only be used
5376 to fold the next insns and then discarded, so
5377 it should be safe.
5379 Note this expression must be explicitly discarded,
5380 by cse_insn, else it may end up in a REG_EQUAL note
5381 and "escape" to cause problems elsewhere. */
5382 return gen_rtx_CONST (GET_MODE (new), new);
5387 return x;
5390 case ASM_OPERANDS:
5391 for (i = XVECLEN (x, 3) - 1; i >= 0; i--)
5392 validate_change (insn, &XVECEXP (x, 3, i),
5393 fold_rtx (XVECEXP (x, 3, i), insn), 0);
5394 break;
5396 default:
5397 break;
5400 const_arg0 = 0;
5401 const_arg1 = 0;
5402 const_arg2 = 0;
5403 mode_arg0 = VOIDmode;
5405 /* Try folding our operands.
5406 Then see which ones have constant values known. */
5408 fmt = GET_RTX_FORMAT (code);
5409 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5410 if (fmt[i] == 'e')
5412 rtx arg = XEXP (x, i);
5413 rtx folded_arg = arg, const_arg = 0;
5414 enum machine_mode mode_arg = GET_MODE (arg);
5415 rtx cheap_arg, expensive_arg;
5416 rtx replacements[2];
5417 int j;
5419 /* Most arguments are cheap, so handle them specially. */
5420 switch (GET_CODE (arg))
5422 case REG:
5423 /* This is the same as calling equiv_constant; it is duplicated
5424 here for speed. */
5425 if (REGNO_QTY_VALID_P (REGNO (arg))
5426 && qty_const[REG_QTY (REGNO (arg))] != 0
5427 && GET_CODE (qty_const[REG_QTY (REGNO (arg))]) != REG
5428 && GET_CODE (qty_const[REG_QTY (REGNO (arg))]) != PLUS)
5429 const_arg
5430 = gen_lowpart_if_possible (GET_MODE (arg),
5431 qty_const[REG_QTY (REGNO (arg))]);
5432 break;
5434 case CONST:
5435 case CONST_INT:
5436 case SYMBOL_REF:
5437 case LABEL_REF:
5438 case CONST_DOUBLE:
5439 const_arg = arg;
5440 break;
5442 #ifdef HAVE_cc0
5443 case CC0:
5444 folded_arg = prev_insn_cc0;
5445 mode_arg = prev_insn_cc0_mode;
5446 const_arg = equiv_constant (folded_arg);
5447 break;
5448 #endif
5450 default:
5451 folded_arg = fold_rtx (arg, insn);
5452 const_arg = equiv_constant (folded_arg);
5455 /* For the first three operands, see if the operand
5456 is constant or equivalent to a constant. */
5457 switch (i)
5459 case 0:
5460 folded_arg0 = folded_arg;
5461 const_arg0 = const_arg;
5462 mode_arg0 = mode_arg;
5463 break;
5464 case 1:
5465 folded_arg1 = folded_arg;
5466 const_arg1 = const_arg;
5467 break;
5468 case 2:
5469 const_arg2 = const_arg;
5470 break;
5473 /* Pick the least expensive of the folded argument and an
5474 equivalent constant argument. */
5475 if (const_arg == 0 || const_arg == folded_arg
5476 || COST (const_arg) > COST (folded_arg))
5477 cheap_arg = folded_arg, expensive_arg = const_arg;
5478 else
5479 cheap_arg = const_arg, expensive_arg = folded_arg;
5481 /* Try to replace the operand with the cheapest of the two
5482 possibilities. If it doesn't work and this is either of the first
5483 two operands of a commutative operation, try swapping them.
5484 If THAT fails, try the more expensive, provided it is cheaper
5485 than what is already there. */
5487 if (cheap_arg == XEXP (x, i))
5488 continue;
5490 if (insn == 0 && ! copied)
5492 x = copy_rtx (x);
5493 copied = 1;
5496 replacements[0] = cheap_arg, replacements[1] = expensive_arg;
5497 for (j = 0;
5498 j < 2 && replacements[j]
5499 && COST (replacements[j]) < COST (XEXP (x, i));
5500 j++)
5502 if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
5503 break;
5505 if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
5507 validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
5508 validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
5510 if (apply_change_group ())
5512 /* Swap them back to be invalid so that this loop can
5513 continue and flag them to be swapped back later. */
5514 rtx tem;
5516 tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
5517 XEXP (x, 1) = tem;
5518 must_swap = 1;
5519 break;
5525 else
5527 if (fmt[i] == 'E')
5528 /* Don't try to fold inside of a vector of expressions.
5529 Doing nothing is harmless. */
5530 {;}
5533 /* If a commutative operation, place a constant integer as the second
5534 operand unless the first operand is also a constant integer. Otherwise,
5535 place any constant second unless the first operand is also a constant. */
5537 if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
5539 if (must_swap || (const_arg0
5540 && (const_arg1 == 0
5541 || (GET_CODE (const_arg0) == CONST_INT
5542 && GET_CODE (const_arg1) != CONST_INT))))
5544 register rtx tem = XEXP (x, 0);
5546 if (insn == 0 && ! copied)
5548 x = copy_rtx (x);
5549 copied = 1;
5552 validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
5553 validate_change (insn, &XEXP (x, 1), tem, 1);
5554 if (apply_change_group ())
5556 tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
5557 tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
5562 /* If X is an arithmetic operation, see if we can simplify it. */
5564 switch (GET_RTX_CLASS (code))
5566 case '1':
5568 int is_const = 0;
5570 /* We can't simplify extension ops unless we know the
5571 original mode. */
5572 if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
5573 && mode_arg0 == VOIDmode)
5574 break;
5576 /* If we had a CONST, strip it off and put it back later if we
5577 fold. */
5578 if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
5579 is_const = 1, const_arg0 = XEXP (const_arg0, 0);
5581 new = simplify_unary_operation (code, mode,
5582 const_arg0 ? const_arg0 : folded_arg0,
5583 mode_arg0);
5584 if (new != 0 && is_const)
5585 new = gen_rtx_CONST (mode, new);
5587 break;
5589 case '<':
5590 /* See what items are actually being compared and set FOLDED_ARG[01]
5591 to those values and CODE to the actual comparison code. If any are
5592 constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
5593 do anything if both operands are already known to be constant. */
5595 if (const_arg0 == 0 || const_arg1 == 0)
5597 struct table_elt *p0, *p1;
5598 rtx true = const_true_rtx, false = const0_rtx;
5599 enum machine_mode mode_arg1;
5601 #ifdef FLOAT_STORE_FLAG_VALUE
5602 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5604 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5605 mode);
5606 false = CONST0_RTX (mode);
5608 #endif
5610 code = find_comparison_args (code, &folded_arg0, &folded_arg1,
5611 &mode_arg0, &mode_arg1);
5612 const_arg0 = equiv_constant (folded_arg0);
5613 const_arg1 = equiv_constant (folded_arg1);
5615 /* If the mode is VOIDmode or a MODE_CC mode, we don't know
5616 what kinds of things are being compared, so we can't do
5617 anything with this comparison. */
5619 if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
5620 break;
5622 /* If we do not now have two constants being compared, see
5623 if we can nevertheless deduce some things about the
5624 comparison. */
5625 if (const_arg0 == 0 || const_arg1 == 0)
5627 /* Is FOLDED_ARG0 frame-pointer plus a constant? Or
5628 non-explicit constant? These aren't zero, but we
5629 don't know their sign. */
5630 if (const_arg1 == const0_rtx
5631 && (NONZERO_BASE_PLUS_P (folded_arg0)
5632 #if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
5633 come out as 0. */
5634 || GET_CODE (folded_arg0) == SYMBOL_REF
5635 #endif
5636 || GET_CODE (folded_arg0) == LABEL_REF
5637 || GET_CODE (folded_arg0) == CONST))
5639 if (code == EQ)
5640 return false;
5641 else if (code == NE)
5642 return true;
5645 /* See if the two operands are the same. We don't do this
5646 for IEEE floating-point since we can't assume x == x
5647 since x might be a NaN. */
5649 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5650 || ! FLOAT_MODE_P (mode_arg0) || flag_fast_math)
5651 && (folded_arg0 == folded_arg1
5652 || (GET_CODE (folded_arg0) == REG
5653 && GET_CODE (folded_arg1) == REG
5654 && (REG_QTY (REGNO (folded_arg0))
5655 == REG_QTY (REGNO (folded_arg1))))
5656 || ((p0 = lookup (folded_arg0,
5657 (safe_hash (folded_arg0, mode_arg0)
5658 % NBUCKETS), mode_arg0))
5659 && (p1 = lookup (folded_arg1,
5660 (safe_hash (folded_arg1, mode_arg0)
5661 % NBUCKETS), mode_arg0))
5662 && p0->first_same_value == p1->first_same_value)))
5663 return ((code == EQ || code == LE || code == GE
5664 || code == LEU || code == GEU)
5665 ? true : false);
5667 /* If FOLDED_ARG0 is a register, see if the comparison we are
5668 doing now is either the same as we did before or the reverse
5669 (we only check the reverse if not floating-point). */
5670 else if (GET_CODE (folded_arg0) == REG)
5672 int qty = REG_QTY (REGNO (folded_arg0));
5674 if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
5675 && (comparison_dominates_p (qty_comparison_code[qty], code)
5676 || (comparison_dominates_p (qty_comparison_code[qty],
5677 reverse_condition (code))
5678 && ! FLOAT_MODE_P (mode_arg0)))
5679 && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
5680 || (const_arg1
5681 && rtx_equal_p (qty_comparison_const[qty],
5682 const_arg1))
5683 || (GET_CODE (folded_arg1) == REG
5684 && (REG_QTY (REGNO (folded_arg1))
5685 == qty_comparison_qty[qty]))))
5686 return (comparison_dominates_p (qty_comparison_code[qty],
5687 code)
5688 ? true : false);
5693 /* If we are comparing against zero, see if the first operand is
5694 equivalent to an IOR with a constant. If so, we may be able to
5695 determine the result of this comparison. */
5697 if (const_arg1 == const0_rtx)
5699 rtx y = lookup_as_function (folded_arg0, IOR);
5700 rtx inner_const;
5702 if (y != 0
5703 && (inner_const = equiv_constant (XEXP (y, 1))) != 0
5704 && GET_CODE (inner_const) == CONST_INT
5705 && INTVAL (inner_const) != 0)
5707 int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
5708 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5709 && (INTVAL (inner_const)
5710 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
5711 rtx true = const_true_rtx, false = const0_rtx;
5713 #ifdef FLOAT_STORE_FLAG_VALUE
5714 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5716 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5717 mode);
5718 false = CONST0_RTX (mode);
5720 #endif
5722 switch (code)
5724 case EQ:
5725 return false;
5726 case NE:
5727 return true;
5728 case LT: case LE:
5729 if (has_sign)
5730 return true;
5731 break;
5732 case GT: case GE:
5733 if (has_sign)
5734 return false;
5735 break;
5736 default:
5737 break;
5742 new = simplify_relational_operation (code, mode_arg0,
5743 const_arg0 ? const_arg0 : folded_arg0,
5744 const_arg1 ? const_arg1 : folded_arg1);
5745 #ifdef FLOAT_STORE_FLAG_VALUE
5746 if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
5747 new = ((new == const0_rtx) ? CONST0_RTX (mode)
5748 : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE, mode));
5749 #endif
5750 break;
5752 case '2':
5753 case 'c':
5754 switch (code)
5756 case PLUS:
5757 /* If the second operand is a LABEL_REF, see if the first is a MINUS
5758 with that LABEL_REF as its second operand. If so, the result is
5759 the first operand of that MINUS. This handles switches with an
5760 ADDR_DIFF_VEC table. */
5761 if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
5763 rtx y
5764 = GET_CODE (folded_arg0) == MINUS ? folded_arg0
5765 : lookup_as_function (folded_arg0, MINUS);
5767 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5768 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
5769 return XEXP (y, 0);
5771 /* Now try for a CONST of a MINUS like the above. */
5772 if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
5773 : lookup_as_function (folded_arg0, CONST))) != 0
5774 && GET_CODE (XEXP (y, 0)) == MINUS
5775 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5776 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg1, 0))
5777 return XEXP (XEXP (y, 0), 0);
5780 /* Likewise if the operands are in the other order. */
5781 if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
5783 rtx y
5784 = GET_CODE (folded_arg1) == MINUS ? folded_arg1
5785 : lookup_as_function (folded_arg1, MINUS);
5787 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5788 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0))
5789 return XEXP (y, 0);
5791 /* Now try for a CONST of a MINUS like the above. */
5792 if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
5793 : lookup_as_function (folded_arg1, CONST))) != 0
5794 && GET_CODE (XEXP (y, 0)) == MINUS
5795 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5796 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg0, 0))
5797 return XEXP (XEXP (y, 0), 0);
5800 /* If second operand is a register equivalent to a negative
5801 CONST_INT, see if we can find a register equivalent to the
5802 positive constant. Make a MINUS if so. Don't do this for
5803 a non-negative constant since we might then alternate between
5804 chosing positive and negative constants. Having the positive
5805 constant previously-used is the more common case. Be sure
5806 the resulting constant is non-negative; if const_arg1 were
5807 the smallest negative number this would overflow: depending
5808 on the mode, this would either just be the same value (and
5809 hence not save anything) or be incorrect. */
5810 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT
5811 && INTVAL (const_arg1) < 0
5812 /* This used to test
5814 - INTVAL (const_arg1) >= 0
5816 But The Sun V5.0 compilers mis-compiled that test. So
5817 instead we test for the problematic value in a more direct
5818 manner and hope the Sun compilers get it correct. */
5819 && INTVAL (const_arg1) !=
5820 ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
5821 && GET_CODE (folded_arg1) == REG)
5823 rtx new_const = GEN_INT (- INTVAL (const_arg1));
5824 struct table_elt *p
5825 = lookup (new_const, safe_hash (new_const, mode) % NBUCKETS,
5826 mode);
5828 if (p)
5829 for (p = p->first_same_value; p; p = p->next_same_value)
5830 if (GET_CODE (p->exp) == REG)
5831 return cse_gen_binary (MINUS, mode, folded_arg0,
5832 canon_reg (p->exp, NULL_RTX));
5834 goto from_plus;
5836 case MINUS:
5837 /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
5838 If so, produce (PLUS Z C2-C). */
5839 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
5841 rtx y = lookup_as_function (XEXP (x, 0), PLUS);
5842 if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
5843 return fold_rtx (plus_constant (copy_rtx (y),
5844 -INTVAL (const_arg1)),
5845 NULL_RTX);
5848 /* ... fall through ... */
5850 from_plus:
5851 case SMIN: case SMAX: case UMIN: case UMAX:
5852 case IOR: case AND: case XOR:
5853 case MULT: case DIV: case UDIV:
5854 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
5855 /* If we have (<op> <reg> <const_int>) for an associative OP and REG
5856 is known to be of similar form, we may be able to replace the
5857 operation with a combined operation. This may eliminate the
5858 intermediate operation if every use is simplified in this way.
5859 Note that the similar optimization done by combine.c only works
5860 if the intermediate operation's result has only one reference. */
5862 if (GET_CODE (folded_arg0) == REG
5863 && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
5865 int is_shift
5866 = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
5867 rtx y = lookup_as_function (folded_arg0, code);
5868 rtx inner_const;
5869 enum rtx_code associate_code;
5870 rtx new_const;
5872 if (y == 0
5873 || 0 == (inner_const
5874 = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
5875 || GET_CODE (inner_const) != CONST_INT
5876 /* If we have compiled a statement like
5877 "if (x == (x & mask1))", and now are looking at
5878 "x & mask2", we will have a case where the first operand
5879 of Y is the same as our first operand. Unless we detect
5880 this case, an infinite loop will result. */
5881 || XEXP (y, 0) == folded_arg0)
5882 break;
5884 /* Don't associate these operations if they are a PLUS with the
5885 same constant and it is a power of two. These might be doable
5886 with a pre- or post-increment. Similarly for two subtracts of
5887 identical powers of two with post decrement. */
5889 if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
5890 && ((HAVE_PRE_INCREMENT
5891 && exact_log2 (INTVAL (const_arg1)) >= 0)
5892 || (HAVE_POST_INCREMENT
5893 && exact_log2 (INTVAL (const_arg1)) >= 0)
5894 || (HAVE_PRE_DECREMENT
5895 && exact_log2 (- INTVAL (const_arg1)) >= 0)
5896 || (HAVE_POST_DECREMENT
5897 && exact_log2 (- INTVAL (const_arg1)) >= 0)))
5898 break;
5900 /* Compute the code used to compose the constants. For example,
5901 A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
5903 associate_code
5904 = (code == MULT || code == DIV || code == UDIV ? MULT
5905 : is_shift || code == PLUS || code == MINUS ? PLUS : code);
5907 new_const = simplify_binary_operation (associate_code, mode,
5908 const_arg1, inner_const);
5910 if (new_const == 0)
5911 break;
5913 /* If we are associating shift operations, don't let this
5914 produce a shift of the size of the object or larger.
5915 This could occur when we follow a sign-extend by a right
5916 shift on a machine that does a sign-extend as a pair
5917 of shifts. */
5919 if (is_shift && GET_CODE (new_const) == CONST_INT
5920 && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
5922 /* As an exception, we can turn an ASHIFTRT of this
5923 form into a shift of the number of bits - 1. */
5924 if (code == ASHIFTRT)
5925 new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
5926 else
5927 break;
5930 y = copy_rtx (XEXP (y, 0));
5932 /* If Y contains our first operand (the most common way this
5933 can happen is if Y is a MEM), we would do into an infinite
5934 loop if we tried to fold it. So don't in that case. */
5936 if (! reg_mentioned_p (folded_arg0, y))
5937 y = fold_rtx (y, insn);
5939 return cse_gen_binary (code, mode, y, new_const);
5941 break;
5943 default:
5944 break;
5947 new = simplify_binary_operation (code, mode,
5948 const_arg0 ? const_arg0 : folded_arg0,
5949 const_arg1 ? const_arg1 : folded_arg1);
5950 break;
5952 case 'o':
5953 /* (lo_sum (high X) X) is simply X. */
5954 if (code == LO_SUM && const_arg0 != 0
5955 && GET_CODE (const_arg0) == HIGH
5956 && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
5957 return const_arg1;
5958 break;
5960 case '3':
5961 case 'b':
5962 new = simplify_ternary_operation (code, mode, mode_arg0,
5963 const_arg0 ? const_arg0 : folded_arg0,
5964 const_arg1 ? const_arg1 : folded_arg1,
5965 const_arg2 ? const_arg2 : XEXP (x, 2));
5966 break;
5968 case 'x':
5969 /* Always eliminate CONSTANT_P_RTX at this stage. */
5970 if (code == CONSTANT_P_RTX)
5971 return (const_arg0 ? const1_rtx : const0_rtx);
5972 break;
5975 return new ? new : x;
5978 /* Return a constant value currently equivalent to X.
5979 Return 0 if we don't know one. */
5981 static rtx
5982 equiv_constant (x)
5983 rtx x;
5985 if (GET_CODE (x) == REG
5986 && REGNO_QTY_VALID_P (REGNO (x))
5987 && qty_const[REG_QTY (REGNO (x))])
5988 x = gen_lowpart_if_possible (GET_MODE (x), qty_const[REG_QTY (REGNO (x))]);
5990 if (x == 0 || CONSTANT_P (x))
5991 return x;
5993 /* If X is a MEM, try to fold it outside the context of any insn to see if
5994 it might be equivalent to a constant. That handles the case where it
5995 is a constant-pool reference. Then try to look it up in the hash table
5996 in case it is something whose value we have seen before. */
5998 if (GET_CODE (x) == MEM)
6000 struct table_elt *elt;
6002 x = fold_rtx (x, NULL_RTX);
6003 if (CONSTANT_P (x))
6004 return x;
6006 elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
6007 if (elt == 0)
6008 return 0;
6010 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
6011 if (elt->is_const && CONSTANT_P (elt->exp))
6012 return elt->exp;
6015 return 0;
6018 /* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
6019 number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
6020 least-significant part of X.
6021 MODE specifies how big a part of X to return.
6023 If the requested operation cannot be done, 0 is returned.
6025 This is similar to gen_lowpart in emit-rtl.c. */
6028 gen_lowpart_if_possible (mode, x)
6029 enum machine_mode mode;
6030 register rtx x;
6032 rtx result = gen_lowpart_common (mode, x);
6034 if (result)
6035 return result;
6036 else if (GET_CODE (x) == MEM)
6038 /* This is the only other case we handle. */
6039 register int offset = 0;
6040 rtx new;
6042 if (WORDS_BIG_ENDIAN)
6043 offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
6044 - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
6045 if (BYTES_BIG_ENDIAN)
6046 /* Adjust the address so that the address-after-the-data is
6047 unchanged. */
6048 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
6049 - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
6050 new = gen_rtx_MEM (mode, plus_constant (XEXP (x, 0), offset));
6051 if (! memory_address_p (mode, XEXP (new, 0)))
6052 return 0;
6053 RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
6054 MEM_COPY_ATTRIBUTES (new, x);
6055 return new;
6057 else
6058 return 0;
6061 /* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
6062 branch. It will be zero if not.
6064 In certain cases, this can cause us to add an equivalence. For example,
6065 if we are following the taken case of
6066 if (i == 2)
6067 we can add the fact that `i' and '2' are now equivalent.
6069 In any case, we can record that this comparison was passed. If the same
6070 comparison is seen later, we will know its value. */
6072 static void
6073 record_jump_equiv (insn, taken)
6074 rtx insn;
6075 int taken;
6077 int cond_known_true;
6078 rtx op0, op1;
6079 enum machine_mode mode, mode0, mode1;
6080 int reversed_nonequality = 0;
6081 enum rtx_code code;
6083 /* Ensure this is the right kind of insn. */
6084 if (! condjump_p (insn) || simplejump_p (insn))
6085 return;
6087 /* See if this jump condition is known true or false. */
6088 if (taken)
6089 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
6090 else
6091 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
6093 /* Get the type of comparison being done and the operands being compared.
6094 If we had to reverse a non-equality condition, record that fact so we
6095 know that it isn't valid for floating-point. */
6096 code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
6097 op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
6098 op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
6100 code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
6101 if (! cond_known_true)
6103 reversed_nonequality = (code != EQ && code != NE);
6104 code = reverse_condition (code);
6107 /* The mode is the mode of the non-constant. */
6108 mode = mode0;
6109 if (mode1 != VOIDmode)
6110 mode = mode1;
6112 record_jump_cond (code, mode, op0, op1, reversed_nonequality);
6115 /* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
6116 REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
6117 Make any useful entries we can with that information. Called from
6118 above function and called recursively. */
6120 static void
6121 record_jump_cond (code, mode, op0, op1, reversed_nonequality)
6122 enum rtx_code code;
6123 enum machine_mode mode;
6124 rtx op0, op1;
6125 int reversed_nonequality;
6127 unsigned op0_hash, op1_hash;
6128 int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
6129 struct table_elt *op0_elt, *op1_elt;
6131 /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
6132 we know that they are also equal in the smaller mode (this is also
6133 true for all smaller modes whether or not there is a SUBREG, but
6134 is not worth testing for with no SUBREG). */
6136 /* Note that GET_MODE (op0) may not equal MODE. */
6137 if (code == EQ && GET_CODE (op0) == SUBREG
6138 && (GET_MODE_SIZE (GET_MODE (op0))
6139 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
6141 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
6142 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
6144 record_jump_cond (code, mode, SUBREG_REG (op0),
6145 tem ? tem : gen_rtx_SUBREG (inner_mode, op1, 0),
6146 reversed_nonequality);
6149 if (code == EQ && GET_CODE (op1) == SUBREG
6150 && (GET_MODE_SIZE (GET_MODE (op1))
6151 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
6153 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
6154 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
6156 record_jump_cond (code, mode, SUBREG_REG (op1),
6157 tem ? tem : gen_rtx_SUBREG (inner_mode, op0, 0),
6158 reversed_nonequality);
6161 /* Similarly, if this is an NE comparison, and either is a SUBREG
6162 making a smaller mode, we know the whole thing is also NE. */
6164 /* Note that GET_MODE (op0) may not equal MODE;
6165 if we test MODE instead, we can get an infinite recursion
6166 alternating between two modes each wider than MODE. */
6168 if (code == NE && GET_CODE (op0) == SUBREG
6169 && subreg_lowpart_p (op0)
6170 && (GET_MODE_SIZE (GET_MODE (op0))
6171 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
6173 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
6174 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
6176 record_jump_cond (code, mode, SUBREG_REG (op0),
6177 tem ? tem : gen_rtx_SUBREG (inner_mode, op1, 0),
6178 reversed_nonequality);
6181 if (code == NE && GET_CODE (op1) == SUBREG
6182 && subreg_lowpart_p (op1)
6183 && (GET_MODE_SIZE (GET_MODE (op1))
6184 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
6186 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
6187 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
6189 record_jump_cond (code, mode, SUBREG_REG (op1),
6190 tem ? tem : gen_rtx_SUBREG (inner_mode, op0, 0),
6191 reversed_nonequality);
6194 /* Hash both operands. */
6196 do_not_record = 0;
6197 hash_arg_in_memory = 0;
6198 hash_arg_in_struct = 0;
6199 op0_hash = HASH (op0, mode);
6200 op0_in_memory = hash_arg_in_memory;
6201 op0_in_struct = hash_arg_in_struct;
6203 if (do_not_record)
6204 return;
6206 do_not_record = 0;
6207 hash_arg_in_memory = 0;
6208 hash_arg_in_struct = 0;
6209 op1_hash = HASH (op1, mode);
6210 op1_in_memory = hash_arg_in_memory;
6211 op1_in_struct = hash_arg_in_struct;
6213 if (do_not_record)
6214 return;
6216 /* Look up both operands. */
6217 op0_elt = lookup (op0, op0_hash, mode);
6218 op1_elt = lookup (op1, op1_hash, mode);
6220 /* If both operands are already equivalent or if they are not in the
6221 table but are identical, do nothing. */
6222 if ((op0_elt != 0 && op1_elt != 0
6223 && op0_elt->first_same_value == op1_elt->first_same_value)
6224 || op0 == op1 || rtx_equal_p (op0, op1))
6225 return;
6227 /* If we aren't setting two things equal all we can do is save this
6228 comparison. Similarly if this is floating-point. In the latter
6229 case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
6230 If we record the equality, we might inadvertently delete code
6231 whose intent was to change -0 to +0. */
6233 if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
6235 /* If we reversed a floating-point comparison, if OP0 is not a
6236 register, or if OP1 is neither a register or constant, we can't
6237 do anything. */
6239 if (GET_CODE (op1) != REG)
6240 op1 = equiv_constant (op1);
6242 if ((reversed_nonequality && FLOAT_MODE_P (mode))
6243 || GET_CODE (op0) != REG || op1 == 0)
6244 return;
6246 /* Put OP0 in the hash table if it isn't already. This gives it a
6247 new quantity number. */
6248 if (op0_elt == 0)
6250 if (insert_regs (op0, NULL_PTR, 0))
6252 rehash_using_reg (op0);
6253 op0_hash = HASH (op0, mode);
6255 /* If OP0 is contained in OP1, this changes its hash code
6256 as well. Faster to rehash than to check, except
6257 for the simple case of a constant. */
6258 if (! CONSTANT_P (op1))
6259 op1_hash = HASH (op1,mode);
6262 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6263 op0_elt->in_memory = op0_in_memory;
6264 op0_elt->in_struct = op0_in_struct;
6267 qty_comparison_code[REG_QTY (REGNO (op0))] = code;
6268 if (GET_CODE (op1) == REG)
6270 /* Look it up again--in case op0 and op1 are the same. */
6271 op1_elt = lookup (op1, op1_hash, mode);
6273 /* Put OP1 in the hash table so it gets a new quantity number. */
6274 if (op1_elt == 0)
6276 if (insert_regs (op1, NULL_PTR, 0))
6278 rehash_using_reg (op1);
6279 op1_hash = HASH (op1, mode);
6282 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6283 op1_elt->in_memory = op1_in_memory;
6284 op1_elt->in_struct = op1_in_struct;
6287 qty_comparison_qty[REG_QTY (REGNO (op0))] = REG_QTY (REGNO (op1));
6288 qty_comparison_const[REG_QTY (REGNO (op0))] = 0;
6290 else
6292 qty_comparison_qty[REG_QTY (REGNO (op0))] = -1;
6293 qty_comparison_const[REG_QTY (REGNO (op0))] = op1;
6296 return;
6299 /* If either side is still missing an equivalence, make it now,
6300 then merge the equivalences. */
6302 if (op0_elt == 0)
6304 if (insert_regs (op0, NULL_PTR, 0))
6306 rehash_using_reg (op0);
6307 op0_hash = HASH (op0, mode);
6310 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6311 op0_elt->in_memory = op0_in_memory;
6312 op0_elt->in_struct = op0_in_struct;
6315 if (op1_elt == 0)
6317 if (insert_regs (op1, NULL_PTR, 0))
6319 rehash_using_reg (op1);
6320 op1_hash = HASH (op1, mode);
6323 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6324 op1_elt->in_memory = op1_in_memory;
6325 op1_elt->in_struct = op1_in_struct;
6328 merge_equiv_classes (op0_elt, op1_elt);
6329 last_jump_equiv_class = op0_elt;
6332 /* CSE processing for one instruction.
6333 First simplify sources and addresses of all assignments
6334 in the instruction, using previously-computed equivalents values.
6335 Then install the new sources and destinations in the table
6336 of available values.
6338 If LIBCALL_INSN is nonzero, don't record any equivalence made in
6339 the insn. It means that INSN is inside libcall block. In this
6340 case LIBCALL_INSN is the corresponding insn with REG_LIBCALL. */
6342 /* Data on one SET contained in the instruction. */
6344 struct set
6346 /* The SET rtx itself. */
6347 rtx rtl;
6348 /* The SET_SRC of the rtx (the original value, if it is changing). */
6349 rtx src;
6350 /* The hash-table element for the SET_SRC of the SET. */
6351 struct table_elt *src_elt;
6352 /* Hash value for the SET_SRC. */
6353 unsigned src_hash;
6354 /* Hash value for the SET_DEST. */
6355 unsigned dest_hash;
6356 /* The SET_DEST, with SUBREG, etc., stripped. */
6357 rtx inner_dest;
6358 /* Place where the pointer to the INNER_DEST was found. */
6359 rtx *inner_dest_loc;
6360 /* Nonzero if the SET_SRC is in memory. */
6361 char src_in_memory;
6362 /* Nonzero if the SET_SRC is in a structure. */
6363 char src_in_struct;
6364 /* Nonzero if the SET_SRC contains something
6365 whose value cannot be predicted and understood. */
6366 char src_volatile;
6367 /* Original machine mode, in case it becomes a CONST_INT. */
6368 enum machine_mode mode;
6369 /* A constant equivalent for SET_SRC, if any. */
6370 rtx src_const;
6371 /* Hash value of constant equivalent for SET_SRC. */
6372 unsigned src_const_hash;
6373 /* Table entry for constant equivalent for SET_SRC, if any. */
6374 struct table_elt *src_const_elt;
6377 static void
6378 cse_insn (insn, libcall_insn)
6379 rtx insn;
6380 rtx libcall_insn;
6382 register rtx x = PATTERN (insn);
6383 register int i;
6384 rtx tem;
6385 register int n_sets = 0;
6387 #ifdef HAVE_cc0
6388 /* Records what this insn does to set CC0. */
6389 rtx this_insn_cc0 = 0;
6390 enum machine_mode this_insn_cc0_mode = VOIDmode;
6391 #endif
6393 rtx src_eqv = 0;
6394 struct table_elt *src_eqv_elt = 0;
6395 int src_eqv_volatile = 0;
6396 int src_eqv_in_memory = 0;
6397 int src_eqv_in_struct = 0;
6398 unsigned src_eqv_hash = 0;
6400 struct set *sets = NULL_PTR;
6402 this_insn = insn;
6404 /* Find all the SETs and CLOBBERs in this instruction.
6405 Record all the SETs in the array `set' and count them.
6406 Also determine whether there is a CLOBBER that invalidates
6407 all memory references, or all references at varying addresses. */
6409 if (GET_CODE (insn) == CALL_INSN)
6411 for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
6412 if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
6413 invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
6416 if (GET_CODE (x) == SET)
6418 sets = (struct set *) alloca (sizeof (struct set));
6419 sets[0].rtl = x;
6421 /* Ignore SETs that are unconditional jumps.
6422 They never need cse processing, so this does not hurt.
6423 The reason is not efficiency but rather
6424 so that we can test at the end for instructions
6425 that have been simplified to unconditional jumps
6426 and not be misled by unchanged instructions
6427 that were unconditional jumps to begin with. */
6428 if (SET_DEST (x) == pc_rtx
6429 && GET_CODE (SET_SRC (x)) == LABEL_REF)
6432 /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
6433 The hard function value register is used only once, to copy to
6434 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
6435 Ensure we invalidate the destination register. On the 80386 no
6436 other code would invalidate it since it is a fixed_reg.
6437 We need not check the return of apply_change_group; see canon_reg. */
6439 else if (GET_CODE (SET_SRC (x)) == CALL)
6441 canon_reg (SET_SRC (x), insn);
6442 apply_change_group ();
6443 fold_rtx (SET_SRC (x), insn);
6444 invalidate (SET_DEST (x), VOIDmode);
6446 else
6447 n_sets = 1;
6449 else if (GET_CODE (x) == PARALLEL)
6451 register int lim = XVECLEN (x, 0);
6453 sets = (struct set *) alloca (lim * sizeof (struct set));
6455 /* Find all regs explicitly clobbered in this insn,
6456 and ensure they are not replaced with any other regs
6457 elsewhere in this insn.
6458 When a reg that is clobbered is also used for input,
6459 we should presume that that is for a reason,
6460 and we should not substitute some other register
6461 which is not supposed to be clobbered.
6462 Therefore, this loop cannot be merged into the one below
6463 because a CALL may precede a CLOBBER and refer to the
6464 value clobbered. We must not let a canonicalization do
6465 anything in that case. */
6466 for (i = 0; i < lim; i++)
6468 register rtx y = XVECEXP (x, 0, i);
6469 if (GET_CODE (y) == CLOBBER)
6471 rtx clobbered = XEXP (y, 0);
6473 if (GET_CODE (clobbered) == REG
6474 || GET_CODE (clobbered) == SUBREG)
6475 invalidate (clobbered, VOIDmode);
6476 else if (GET_CODE (clobbered) == STRICT_LOW_PART
6477 || GET_CODE (clobbered) == ZERO_EXTRACT)
6478 invalidate (XEXP (clobbered, 0), GET_MODE (clobbered));
6482 for (i = 0; i < lim; i++)
6484 register rtx y = XVECEXP (x, 0, i);
6485 if (GET_CODE (y) == SET)
6487 /* As above, we ignore unconditional jumps and call-insns and
6488 ignore the result of apply_change_group. */
6489 if (GET_CODE (SET_SRC (y)) == CALL)
6491 canon_reg (SET_SRC (y), insn);
6492 apply_change_group ();
6493 fold_rtx (SET_SRC (y), insn);
6494 invalidate (SET_DEST (y), VOIDmode);
6496 else if (SET_DEST (y) == pc_rtx
6497 && GET_CODE (SET_SRC (y)) == LABEL_REF)
6499 else
6500 sets[n_sets++].rtl = y;
6502 else if (GET_CODE (y) == CLOBBER)
6504 /* If we clobber memory, canon the address.
6505 This does nothing when a register is clobbered
6506 because we have already invalidated the reg. */
6507 if (GET_CODE (XEXP (y, 0)) == MEM)
6508 canon_reg (XEXP (y, 0), NULL_RTX);
6510 else if (GET_CODE (y) == USE
6511 && ! (GET_CODE (XEXP (y, 0)) == REG
6512 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
6513 canon_reg (y, NULL_RTX);
6514 else if (GET_CODE (y) == CALL)
6516 /* The result of apply_change_group can be ignored; see
6517 canon_reg. */
6518 canon_reg (y, insn);
6519 apply_change_group ();
6520 fold_rtx (y, insn);
6524 else if (GET_CODE (x) == CLOBBER)
6526 if (GET_CODE (XEXP (x, 0)) == MEM)
6527 canon_reg (XEXP (x, 0), NULL_RTX);
6530 /* Canonicalize a USE of a pseudo register or memory location. */
6531 else if (GET_CODE (x) == USE
6532 && ! (GET_CODE (XEXP (x, 0)) == REG
6533 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
6534 canon_reg (XEXP (x, 0), NULL_RTX);
6535 else if (GET_CODE (x) == CALL)
6537 /* The result of apply_change_group can be ignored; see canon_reg. */
6538 canon_reg (x, insn);
6539 apply_change_group ();
6540 fold_rtx (x, insn);
6543 /* Store the equivalent value in SRC_EQV, if different, or if the DEST
6544 is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV
6545 is handled specially for this case, and if it isn't set, then there will
6546 be no equivalence for the destination. */
6547 if (n_sets == 1 && REG_NOTES (insn) != 0
6548 && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
6549 && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
6550 || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
6551 src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
6553 /* Canonicalize sources and addresses of destinations.
6554 We do this in a separate pass to avoid problems when a MATCH_DUP is
6555 present in the insn pattern. In that case, we want to ensure that
6556 we don't break the duplicate nature of the pattern. So we will replace
6557 both operands at the same time. Otherwise, we would fail to find an
6558 equivalent substitution in the loop calling validate_change below.
6560 We used to suppress canonicalization of DEST if it appears in SRC,
6561 but we don't do this any more. */
6563 for (i = 0; i < n_sets; i++)
6565 rtx dest = SET_DEST (sets[i].rtl);
6566 rtx src = SET_SRC (sets[i].rtl);
6567 rtx new = canon_reg (src, insn);
6568 int insn_code;
6570 if ((GET_CODE (new) == REG && GET_CODE (src) == REG
6571 && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
6572 != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
6573 || (insn_code = recog_memoized (insn)) < 0
6574 || insn_data[insn_code].n_dups > 0)
6575 validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
6576 else
6577 SET_SRC (sets[i].rtl) = new;
6579 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
6581 validate_change (insn, &XEXP (dest, 1),
6582 canon_reg (XEXP (dest, 1), insn), 1);
6583 validate_change (insn, &XEXP (dest, 2),
6584 canon_reg (XEXP (dest, 2), insn), 1);
6587 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
6588 || GET_CODE (dest) == ZERO_EXTRACT
6589 || GET_CODE (dest) == SIGN_EXTRACT)
6590 dest = XEXP (dest, 0);
6592 if (GET_CODE (dest) == MEM)
6593 canon_reg (dest, insn);
6596 /* Now that we have done all the replacements, we can apply the change
6597 group and see if they all work. Note that this will cause some
6598 canonicalizations that would have worked individually not to be applied
6599 because some other canonicalization didn't work, but this should not
6600 occur often.
6602 The result of apply_change_group can be ignored; see canon_reg. */
6604 apply_change_group ();
6606 /* Set sets[i].src_elt to the class each source belongs to.
6607 Detect assignments from or to volatile things
6608 and set set[i] to zero so they will be ignored
6609 in the rest of this function.
6611 Nothing in this loop changes the hash table or the register chains. */
6613 for (i = 0; i < n_sets; i++)
6615 register rtx src, dest;
6616 register rtx src_folded;
6617 register struct table_elt *elt = 0, *p;
6618 enum machine_mode mode;
6619 rtx src_eqv_here;
6620 rtx src_const = 0;
6621 rtx src_related = 0;
6622 struct table_elt *src_const_elt = 0;
6623 int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
6624 int src_related_cost = 10000, src_elt_cost = 10000;
6625 /* Set non-zero if we need to call force_const_mem on with the
6626 contents of src_folded before using it. */
6627 int src_folded_force_flag = 0;
6629 dest = SET_DEST (sets[i].rtl);
6630 src = SET_SRC (sets[i].rtl);
6632 /* If SRC is a constant that has no machine mode,
6633 hash it with the destination's machine mode.
6634 This way we can keep different modes separate. */
6636 mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
6637 sets[i].mode = mode;
6639 if (src_eqv)
6641 enum machine_mode eqvmode = mode;
6642 if (GET_CODE (dest) == STRICT_LOW_PART)
6643 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
6644 do_not_record = 0;
6645 hash_arg_in_memory = 0;
6646 hash_arg_in_struct = 0;
6647 src_eqv = fold_rtx (src_eqv, insn);
6648 src_eqv_hash = HASH (src_eqv, eqvmode);
6650 /* Find the equivalence class for the equivalent expression. */
6652 if (!do_not_record)
6653 src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);
6655 src_eqv_volatile = do_not_record;
6656 src_eqv_in_memory = hash_arg_in_memory;
6657 src_eqv_in_struct = hash_arg_in_struct;
6660 /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
6661 value of the INNER register, not the destination. So it is not
6662 a valid substitution for the source. But save it for later. */
6663 if (GET_CODE (dest) == STRICT_LOW_PART)
6664 src_eqv_here = 0;
6665 else
6666 src_eqv_here = src_eqv;
6668 /* Simplify and foldable subexpressions in SRC. Then get the fully-
6669 simplified result, which may not necessarily be valid. */
6670 src_folded = fold_rtx (src, insn);
6672 #if 0
6673 /* ??? This caused bad code to be generated for the m68k port with -O2.
6674 Suppose src is (CONST_INT -1), and that after truncation src_folded
6675 is (CONST_INT 3). Suppose src_folded is then used for src_const.
6676 At the end we will add src and src_const to the same equivalence
6677 class. We now have 3 and -1 on the same equivalence class. This
6678 causes later instructions to be mis-optimized. */
6679 /* If storing a constant in a bitfield, pre-truncate the constant
6680 so we will be able to record it later. */
6681 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6682 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6684 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6686 if (GET_CODE (src) == CONST_INT
6687 && GET_CODE (width) == CONST_INT
6688 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6689 && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6690 src_folded
6691 = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
6692 << INTVAL (width)) - 1));
6694 #endif
6696 /* Compute SRC's hash code, and also notice if it
6697 should not be recorded at all. In that case,
6698 prevent any further processing of this assignment. */
6699 do_not_record = 0;
6700 hash_arg_in_memory = 0;
6701 hash_arg_in_struct = 0;
6703 sets[i].src = src;
6704 sets[i].src_hash = HASH (src, mode);
6705 sets[i].src_volatile = do_not_record;
6706 sets[i].src_in_memory = hash_arg_in_memory;
6707 sets[i].src_in_struct = hash_arg_in_struct;
6709 /* If SRC is a MEM, there is a REG_EQUIV note for SRC, and DEST is
6710 a pseudo that is set more than once, do not record SRC. Using
6711 SRC as a replacement for anything else will be incorrect in that
6712 situation. Note that this usually occurs only for stack slots,
6713 in which case all the RTL would be referring to SRC, so we don't
6714 lose any optimization opportunities by not having SRC in the
6715 hash table. */
6717 if (GET_CODE (src) == MEM
6718 && find_reg_note (insn, REG_EQUIV, src) != 0
6719 && GET_CODE (dest) == REG
6720 && REGNO (dest) >= FIRST_PSEUDO_REGISTER
6721 && REG_N_SETS (REGNO (dest)) != 1)
6722 sets[i].src_volatile = 1;
6724 #if 0
6725 /* It is no longer clear why we used to do this, but it doesn't
6726 appear to still be needed. So let's try without it since this
6727 code hurts cse'ing widened ops. */
6728 /* If source is a perverse subreg (such as QI treated as an SI),
6729 treat it as volatile. It may do the work of an SI in one context
6730 where the extra bits are not being used, but cannot replace an SI
6731 in general. */
6732 if (GET_CODE (src) == SUBREG
6733 && (GET_MODE_SIZE (GET_MODE (src))
6734 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6735 sets[i].src_volatile = 1;
6736 #endif
6738 /* Locate all possible equivalent forms for SRC. Try to replace
6739 SRC in the insn with each cheaper equivalent.
6741 We have the following types of equivalents: SRC itself, a folded
6742 version, a value given in a REG_EQUAL note, or a value related
6743 to a constant.
6745 Each of these equivalents may be part of an additional class
6746 of equivalents (if more than one is in the table, they must be in
6747 the same class; we check for this).
6749 If the source is volatile, we don't do any table lookups.
6751 We note any constant equivalent for possible later use in a
6752 REG_NOTE. */
6754 if (!sets[i].src_volatile)
6755 elt = lookup (src, sets[i].src_hash, mode);
6757 sets[i].src_elt = elt;
6759 if (elt && src_eqv_here && src_eqv_elt)
6761 if (elt->first_same_value != src_eqv_elt->first_same_value)
6763 /* The REG_EQUAL is indicating that two formerly distinct
6764 classes are now equivalent. So merge them. */
6765 merge_equiv_classes (elt, src_eqv_elt);
6766 src_eqv_hash = HASH (src_eqv, elt->mode);
6767 src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
6770 src_eqv_here = 0;
6773 else if (src_eqv_elt)
6774 elt = src_eqv_elt;
6776 /* Try to find a constant somewhere and record it in `src_const'.
6777 Record its table element, if any, in `src_const_elt'. Look in
6778 any known equivalences first. (If the constant is not in the
6779 table, also set `sets[i].src_const_hash'). */
6780 if (elt)
6781 for (p = elt->first_same_value; p; p = p->next_same_value)
6782 if (p->is_const)
6784 src_const = p->exp;
6785 src_const_elt = elt;
6786 break;
6789 if (src_const == 0
6790 && (CONSTANT_P (src_folded)
6791 /* Consider (minus (label_ref L1) (label_ref L2)) as
6792 "constant" here so we will record it. This allows us
6793 to fold switch statements when an ADDR_DIFF_VEC is used. */
6794 || (GET_CODE (src_folded) == MINUS
6795 && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
6796 && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
6797 src_const = src_folded, src_const_elt = elt;
6798 else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
6799 src_const = src_eqv_here, src_const_elt = src_eqv_elt;
6801 /* If we don't know if the constant is in the table, get its
6802 hash code and look it up. */
6803 if (src_const && src_const_elt == 0)
6805 sets[i].src_const_hash = HASH (src_const, mode);
6806 src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
6809 sets[i].src_const = src_const;
6810 sets[i].src_const_elt = src_const_elt;
6812 /* If the constant and our source are both in the table, mark them as
6813 equivalent. Otherwise, if a constant is in the table but the source
6814 isn't, set ELT to it. */
6815 if (src_const_elt && elt
6816 && src_const_elt->first_same_value != elt->first_same_value)
6817 merge_equiv_classes (elt, src_const_elt);
6818 else if (src_const_elt && elt == 0)
6819 elt = src_const_elt;
6821 /* See if there is a register linearly related to a constant
6822 equivalent of SRC. */
6823 if (src_const
6824 && (GET_CODE (src_const) == CONST
6825 || (src_const_elt && src_const_elt->related_value != 0)))
6827 src_related = use_related_value (src_const, src_const_elt);
6828 if (src_related)
6830 struct table_elt *src_related_elt
6831 = lookup (src_related, HASH (src_related, mode), mode);
6832 if (src_related_elt && elt)
6834 if (elt->first_same_value
6835 != src_related_elt->first_same_value)
6836 /* This can occur when we previously saw a CONST
6837 involving a SYMBOL_REF and then see the SYMBOL_REF
6838 twice. Merge the involved classes. */
6839 merge_equiv_classes (elt, src_related_elt);
6841 src_related = 0;
6842 src_related_elt = 0;
6844 else if (src_related_elt && elt == 0)
6845 elt = src_related_elt;
6849 /* See if we have a CONST_INT that is already in a register in a
6850 wider mode. */
6852 if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
6853 && GET_MODE_CLASS (mode) == MODE_INT
6854 && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
6856 enum machine_mode wider_mode;
6858 for (wider_mode = GET_MODE_WIDER_MODE (mode);
6859 GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
6860 && src_related == 0;
6861 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
6863 struct table_elt *const_elt
6864 = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
6866 if (const_elt == 0)
6867 continue;
6869 for (const_elt = const_elt->first_same_value;
6870 const_elt; const_elt = const_elt->next_same_value)
6871 if (GET_CODE (const_elt->exp) == REG)
6873 src_related = gen_lowpart_if_possible (mode,
6874 const_elt->exp);
6875 break;
6880 /* Another possibility is that we have an AND with a constant in
6881 a mode narrower than a word. If so, it might have been generated
6882 as part of an "if" which would narrow the AND. If we already
6883 have done the AND in a wider mode, we can use a SUBREG of that
6884 value. */
6886 if (flag_expensive_optimizations && ! src_related
6887 && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
6888 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6890 enum machine_mode tmode;
6891 rtx new_and = gen_rtx_AND (VOIDmode, NULL_RTX, XEXP (src, 1));
6893 for (tmode = GET_MODE_WIDER_MODE (mode);
6894 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6895 tmode = GET_MODE_WIDER_MODE (tmode))
6897 rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
6898 struct table_elt *larger_elt;
6900 if (inner)
6902 PUT_MODE (new_and, tmode);
6903 XEXP (new_and, 0) = inner;
6904 larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
6905 if (larger_elt == 0)
6906 continue;
6908 for (larger_elt = larger_elt->first_same_value;
6909 larger_elt; larger_elt = larger_elt->next_same_value)
6910 if (GET_CODE (larger_elt->exp) == REG)
6912 src_related
6913 = gen_lowpart_if_possible (mode, larger_elt->exp);
6914 break;
6917 if (src_related)
6918 break;
6923 #ifdef LOAD_EXTEND_OP
6924 /* See if a MEM has already been loaded with a widening operation;
6925 if it has, we can use a subreg of that. Many CISC machines
6926 also have such operations, but this is only likely to be
6927 beneficial these machines. */
6929 if (flag_expensive_optimizations && src_related == 0
6930 && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6931 && GET_MODE_CLASS (mode) == MODE_INT
6932 && GET_CODE (src) == MEM && ! do_not_record
6933 && LOAD_EXTEND_OP (mode) != NIL)
6935 enum machine_mode tmode;
6937 /* Set what we are trying to extend and the operation it might
6938 have been extended with. */
6939 PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
6940 XEXP (memory_extend_rtx, 0) = src;
6942 for (tmode = GET_MODE_WIDER_MODE (mode);
6943 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6944 tmode = GET_MODE_WIDER_MODE (tmode))
6946 struct table_elt *larger_elt;
6948 PUT_MODE (memory_extend_rtx, tmode);
6949 larger_elt = lookup (memory_extend_rtx,
6950 HASH (memory_extend_rtx, tmode), tmode);
6951 if (larger_elt == 0)
6952 continue;
6954 for (larger_elt = larger_elt->first_same_value;
6955 larger_elt; larger_elt = larger_elt->next_same_value)
6956 if (GET_CODE (larger_elt->exp) == REG)
6958 src_related = gen_lowpart_if_possible (mode,
6959 larger_elt->exp);
6960 break;
6963 if (src_related)
6964 break;
6967 #endif /* LOAD_EXTEND_OP */
6969 if (src == src_folded)
6970 src_folded = 0;
6972 /* At this point, ELT, if non-zero, points to a class of expressions
6973 equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
6974 and SRC_RELATED, if non-zero, each contain additional equivalent
6975 expressions. Prune these latter expressions by deleting expressions
6976 already in the equivalence class.
6978 Check for an equivalent identical to the destination. If found,
6979 this is the preferred equivalent since it will likely lead to
6980 elimination of the insn. Indicate this by placing it in
6981 `src_related'. */
6983 if (elt) elt = elt->first_same_value;
6984 for (p = elt; p; p = p->next_same_value)
6986 enum rtx_code code = GET_CODE (p->exp);
6988 /* If the expression is not valid, ignore it. Then we do not
6989 have to check for validity below. In most cases, we can use
6990 `rtx_equal_p', since canonicalization has already been done. */
6991 if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
6992 continue;
6994 /* Also skip paradoxical subregs, unless that's what we're
6995 looking for. */
6996 if (code == SUBREG
6997 && (GET_MODE_SIZE (GET_MODE (p->exp))
6998 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))
6999 && ! (src != 0
7000 && GET_CODE (src) == SUBREG
7001 && GET_MODE (src) == GET_MODE (p->exp)
7002 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
7003 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))))
7004 continue;
7006 if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
7007 src = 0;
7008 else if (src_folded && GET_CODE (src_folded) == code
7009 && rtx_equal_p (src_folded, p->exp))
7010 src_folded = 0;
7011 else if (src_eqv_here && GET_CODE (src_eqv_here) == code
7012 && rtx_equal_p (src_eqv_here, p->exp))
7013 src_eqv_here = 0;
7014 else if (src_related && GET_CODE (src_related) == code
7015 && rtx_equal_p (src_related, p->exp))
7016 src_related = 0;
7018 /* This is the same as the destination of the insns, we want
7019 to prefer it. Copy it to src_related. The code below will
7020 then give it a negative cost. */
7021 if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
7022 src_related = dest;
7026 /* Find the cheapest valid equivalent, trying all the available
7027 possibilities. Prefer items not in the hash table to ones
7028 that are when they are equal cost. Note that we can never
7029 worsen an insn as the current contents will also succeed.
7030 If we find an equivalent identical to the destination, use it as best,
7031 since this insn will probably be eliminated in that case. */
7032 if (src)
7034 if (rtx_equal_p (src, dest))
7035 src_cost = -1;
7036 else
7037 src_cost = COST (src);
7040 if (src_eqv_here)
7042 if (rtx_equal_p (src_eqv_here, dest))
7043 src_eqv_cost = -1;
7044 else
7045 src_eqv_cost = COST (src_eqv_here);
7048 if (src_folded)
7050 if (rtx_equal_p (src_folded, dest))
7051 src_folded_cost = -1;
7052 else
7053 src_folded_cost = COST (src_folded);
7056 if (src_related)
7058 if (rtx_equal_p (src_related, dest))
7059 src_related_cost = -1;
7060 else
7061 src_related_cost = COST (src_related);
7064 /* If this was an indirect jump insn, a known label will really be
7065 cheaper even though it looks more expensive. */
7066 if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
7067 src_folded = src_const, src_folded_cost = -1;
7069 /* Terminate loop when replacement made. This must terminate since
7070 the current contents will be tested and will always be valid. */
7071 while (1)
7073 rtx trial, old_src;
7075 /* Skip invalid entries. */
7076 while (elt && GET_CODE (elt->exp) != REG
7077 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
7078 elt = elt->next_same_value;
7080 /* A paradoxical subreg would be bad here: it'll be the right
7081 size, but later may be adjusted so that the upper bits aren't
7082 what we want. So reject it. */
7083 if (elt != 0
7084 && GET_CODE (elt->exp) == SUBREG
7085 && (GET_MODE_SIZE (GET_MODE (elt->exp))
7086 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))
7087 /* It is okay, though, if the rtx we're trying to match
7088 will ignore any of the bits we can't predict. */
7089 && ! (src != 0
7090 && GET_CODE (src) == SUBREG
7091 && GET_MODE (src) == GET_MODE (elt->exp)
7092 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
7093 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))))
7095 elt = elt->next_same_value;
7096 continue;
7099 if (elt) src_elt_cost = elt->cost;
7101 /* Find cheapest and skip it for the next time. For items
7102 of equal cost, use this order:
7103 src_folded, src, src_eqv, src_related and hash table entry. */
7104 if (src_folded_cost <= src_cost
7105 && src_folded_cost <= src_eqv_cost
7106 && src_folded_cost <= src_related_cost
7107 && src_folded_cost <= src_elt_cost)
7109 trial = src_folded, src_folded_cost = 10000;
7110 if (src_folded_force_flag)
7111 trial = force_const_mem (mode, trial);
7113 else if (src_cost <= src_eqv_cost
7114 && src_cost <= src_related_cost
7115 && src_cost <= src_elt_cost)
7116 trial = src, src_cost = 10000;
7117 else if (src_eqv_cost <= src_related_cost
7118 && src_eqv_cost <= src_elt_cost)
7119 trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
7120 else if (src_related_cost <= src_elt_cost)
7121 trial = copy_rtx (src_related), src_related_cost = 10000;
7122 else
7124 trial = copy_rtx (elt->exp);
7125 elt = elt->next_same_value;
7126 src_elt_cost = 10000;
7129 /* We don't normally have an insn matching (set (pc) (pc)), so
7130 check for this separately here. We will delete such an
7131 insn below.
7133 Tablejump insns contain a USE of the table, so simply replacing
7134 the operand with the constant won't match. This is simply an
7135 unconditional branch, however, and is therefore valid. Just
7136 insert the substitution here and we will delete and re-emit
7137 the insn later. */
7139 /* Keep track of the original SET_SRC so that we can fix notes
7140 on libcall instructions. */
7141 old_src = SET_SRC (sets[i].rtl);
7143 if (n_sets == 1 && dest == pc_rtx
7144 && (trial == pc_rtx
7145 || (GET_CODE (trial) == LABEL_REF
7146 && ! condjump_p (insn))))
7148 /* If TRIAL is a label in front of a jump table, we are
7149 really falling through the switch (this is how casesi
7150 insns work), so we must branch around the table. */
7151 if (GET_CODE (trial) == CODE_LABEL
7152 && NEXT_INSN (trial) != 0
7153 && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
7154 && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
7155 || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
7157 trial = gen_rtx_LABEL_REF (Pmode, get_label_after (trial));
7159 SET_SRC (sets[i].rtl) = trial;
7160 cse_jumps_altered = 1;
7161 break;
7164 /* Look for a substitution that makes a valid insn. */
7165 else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
7167 /* If we just made a substitution inside a libcall, then we
7168 need to make the same substitution in any notes attached
7169 to the RETVAL insn. */
7170 if (libcall_insn
7171 && (GET_CODE (old_src) == REG
7172 || GET_CODE (old_src) == SUBREG
7173 || GET_CODE (old_src) == MEM))
7174 replace_rtx (REG_NOTES (libcall_insn), old_src,
7175 canon_reg (SET_SRC (sets[i].rtl), insn));
7177 /* The result of apply_change_group can be ignored; see
7178 canon_reg. */
7180 validate_change (insn, &SET_SRC (sets[i].rtl),
7181 canon_reg (SET_SRC (sets[i].rtl), insn),
7183 apply_change_group ();
7184 break;
7187 /* If we previously found constant pool entries for
7188 constants and this is a constant, try making a
7189 pool entry. Put it in src_folded unless we already have done
7190 this since that is where it likely came from. */
7192 else if (constant_pool_entries_cost
7193 && CONSTANT_P (trial)
7194 && ! (GET_CODE (trial) == CONST
7195 && GET_CODE (XEXP (trial, 0)) == TRUNCATE)
7196 && (src_folded == 0
7197 || (GET_CODE (src_folded) != MEM
7198 && ! src_folded_force_flag))
7199 && GET_MODE_CLASS (mode) != MODE_CC
7200 && mode != VOIDmode)
7202 src_folded_force_flag = 1;
7203 src_folded = trial;
7204 src_folded_cost = constant_pool_entries_cost;
7208 src = SET_SRC (sets[i].rtl);
7210 /* In general, it is good to have a SET with SET_SRC == SET_DEST.
7211 However, there is an important exception: If both are registers
7212 that are not the head of their equivalence class, replace SET_SRC
7213 with the head of the class. If we do not do this, we will have
7214 both registers live over a portion of the basic block. This way,
7215 their lifetimes will likely abut instead of overlapping. */
7216 if (GET_CODE (dest) == REG
7217 && REGNO_QTY_VALID_P (REGNO (dest))
7218 && qty_mode[REG_QTY (REGNO (dest))] == GET_MODE (dest)
7219 && qty_first_reg[REG_QTY (REGNO (dest))] != REGNO (dest)
7220 && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
7221 /* Don't do this if the original insn had a hard reg as
7222 SET_SRC or SET_DEST. */
7223 && (GET_CODE (sets[i].src) != REG
7224 || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER)
7225 && (GET_CODE (dest) != REG || REGNO (dest) >= FIRST_PSEUDO_REGISTER))
7226 /* We can't call canon_reg here because it won't do anything if
7227 SRC is a hard register. */
7229 int first = qty_first_reg[REG_QTY (REGNO (src))];
7230 rtx new_src
7231 = (first >= FIRST_PSEUDO_REGISTER
7232 ? regno_reg_rtx[first] : gen_rtx_REG (GET_MODE (src), first));
7234 /* We must use validate-change even for this, because this
7235 might be a special no-op instruction, suitable only to
7236 tag notes onto. */
7237 if (validate_change (insn, &SET_SRC (sets[i].rtl), new_src, 0))
7239 src = new_src;
7240 /* If we had a constant that is cheaper than what we are now
7241 setting SRC to, use that constant. We ignored it when we
7242 thought we could make this into a no-op. */
7243 if (src_const && COST (src_const) < COST (src)
7244 && validate_change (insn, &SET_SRC (sets[i].rtl), src_const,
7246 src = src_const;
7250 /* If we made a change, recompute SRC values. */
7251 if (src != sets[i].src)
7253 do_not_record = 0;
7254 hash_arg_in_memory = 0;
7255 hash_arg_in_struct = 0;
7256 sets[i].src = src;
7257 sets[i].src_hash = HASH (src, mode);
7258 sets[i].src_volatile = do_not_record;
7259 sets[i].src_in_memory = hash_arg_in_memory;
7260 sets[i].src_in_struct = hash_arg_in_struct;
7261 sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
7264 /* If this is a single SET, we are setting a register, and we have an
7265 equivalent constant, we want to add a REG_NOTE. We don't want
7266 to write a REG_EQUAL note for a constant pseudo since verifying that
7267 that pseudo hasn't been eliminated is a pain. Such a note also
7268 won't help anything.
7270 Avoid a REG_EQUAL note for (CONST (MINUS (LABEL_REF) (LABEL_REF)))
7271 which can be created for a reference to a compile time computable
7272 entry in a jump table. */
7274 if (n_sets == 1 && src_const && GET_CODE (dest) == REG
7275 && GET_CODE (src_const) != REG
7276 && ! (GET_CODE (src_const) == CONST
7277 && GET_CODE (XEXP (src_const, 0)) == MINUS
7278 && GET_CODE (XEXP (XEXP (src_const, 0), 0)) == LABEL_REF
7279 && GET_CODE (XEXP (XEXP (src_const, 0), 1)) == LABEL_REF))
7281 tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
7283 /* Make sure that the rtx is not shared with any other insn. */
7284 src_const = copy_rtx (src_const);
7286 /* Record the actual constant value in a REG_EQUAL note, making
7287 a new one if one does not already exist. */
7288 if (tem)
7289 XEXP (tem, 0) = src_const;
7290 else
7291 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL,
7292 src_const, REG_NOTES (insn));
7294 /* If storing a constant value in a register that
7295 previously held the constant value 0,
7296 record this fact with a REG_WAS_0 note on this insn.
7298 Note that the *register* is required to have previously held 0,
7299 not just any register in the quantity and we must point to the
7300 insn that set that register to zero.
7302 Rather than track each register individually, we just see if
7303 the last set for this quantity was for this register. */
7305 if (REGNO_QTY_VALID_P (REGNO (dest))
7306 && qty_const[REG_QTY (REGNO (dest))] == const0_rtx)
7308 /* See if we previously had a REG_WAS_0 note. */
7309 rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7310 rtx const_insn = qty_const_insn[REG_QTY (REGNO (dest))];
7312 if ((tem = single_set (const_insn)) != 0
7313 && rtx_equal_p (SET_DEST (tem), dest))
7315 if (note)
7316 XEXP (note, 0) = const_insn;
7317 else
7318 REG_NOTES (insn)
7319 = gen_rtx_INSN_LIST (REG_WAS_0, const_insn,
7320 REG_NOTES (insn));
7325 /* Now deal with the destination. */
7326 do_not_record = 0;
7327 sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
7329 /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
7330 to the MEM or REG within it. */
7331 while (GET_CODE (dest) == SIGN_EXTRACT
7332 || GET_CODE (dest) == ZERO_EXTRACT
7333 || GET_CODE (dest) == SUBREG
7334 || GET_CODE (dest) == STRICT_LOW_PART)
7336 sets[i].inner_dest_loc = &XEXP (dest, 0);
7337 dest = XEXP (dest, 0);
7340 sets[i].inner_dest = dest;
7342 if (GET_CODE (dest) == MEM)
7344 #ifdef PUSH_ROUNDING
7345 /* Stack pushes invalidate the stack pointer. */
7346 rtx addr = XEXP (dest, 0);
7347 if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
7348 || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
7349 && XEXP (addr, 0) == stack_pointer_rtx)
7350 invalidate (stack_pointer_rtx, Pmode);
7351 #endif
7352 dest = fold_rtx (dest, insn);
7355 /* Compute the hash code of the destination now,
7356 before the effects of this instruction are recorded,
7357 since the register values used in the address computation
7358 are those before this instruction. */
7359 sets[i].dest_hash = HASH (dest, mode);
7361 /* Don't enter a bit-field in the hash table
7362 because the value in it after the store
7363 may not equal what was stored, due to truncation. */
7365 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
7366 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
7368 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
7370 if (src_const != 0 && GET_CODE (src_const) == CONST_INT
7371 && GET_CODE (width) == CONST_INT
7372 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
7373 && ! (INTVAL (src_const)
7374 & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
7375 /* Exception: if the value is constant,
7376 and it won't be truncated, record it. */
7378 else
7380 /* This is chosen so that the destination will be invalidated
7381 but no new value will be recorded.
7382 We must invalidate because sometimes constant
7383 values can be recorded for bitfields. */
7384 sets[i].src_elt = 0;
7385 sets[i].src_volatile = 1;
7386 src_eqv = 0;
7387 src_eqv_elt = 0;
7391 /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
7392 the insn. */
7393 else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
7395 /* One less use of the label this insn used to jump to. */
7396 if (JUMP_LABEL (insn) != 0)
7397 --LABEL_NUSES (JUMP_LABEL (insn));
7398 PUT_CODE (insn, NOTE);
7399 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
7400 NOTE_SOURCE_FILE (insn) = 0;
7401 cse_jumps_altered = 1;
7402 /* No more processing for this set. */
7403 sets[i].rtl = 0;
7406 /* If this SET is now setting PC to a label, we know it used to
7407 be a conditional or computed branch. So we see if we can follow
7408 it. If it was a computed branch, delete it and re-emit. */
7409 else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
7411 rtx p;
7413 /* If this is not in the format for a simple branch and
7414 we are the only SET in it, re-emit it. */
7415 if (! simplejump_p (insn) && n_sets == 1)
7417 rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
7418 JUMP_LABEL (new) = XEXP (src, 0);
7419 LABEL_NUSES (XEXP (src, 0))++;
7420 delete_insn (insn);
7421 insn = new;
7423 else
7424 /* Otherwise, force rerecognition, since it probably had
7425 a different pattern before.
7426 This shouldn't really be necessary, since whatever
7427 changed the source value above should have done this.
7428 Until the right place is found, might as well do this here. */
7429 INSN_CODE (insn) = -1;
7431 /* Now that we've converted this jump to an unconditional jump,
7432 there is dead code after it. Delete the dead code until we
7433 reach a BARRIER, the end of the function, or a label. Do
7434 not delete NOTEs except for NOTE_INSN_DELETED since later
7435 phases assume these notes are retained. */
7437 never_reached_warning (insn);
7439 p = insn;
7441 while (NEXT_INSN (p) != 0
7442 && GET_CODE (NEXT_INSN (p)) != BARRIER
7443 && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
7445 /* Note, we must update P with the return value from
7446 delete_insn, otherwise we could get an infinite loop
7447 if NEXT_INSN (p) had INSN_DELETED_P set. */
7448 if (GET_CODE (NEXT_INSN (p)) != NOTE
7449 || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
7450 p = PREV_INSN (delete_insn (NEXT_INSN (p)));
7451 else
7452 p = NEXT_INSN (p);
7455 /* If we don't have a BARRIER immediately after INSN, put one there.
7456 Much code assumes that there are no NOTEs between a JUMP_INSN and
7457 BARRIER. */
7459 if (NEXT_INSN (insn) == 0
7460 || GET_CODE (NEXT_INSN (insn)) != BARRIER)
7461 emit_barrier_before (NEXT_INSN (insn));
7463 /* We might have two BARRIERs separated by notes. Delete the second
7464 one if so. */
7466 if (p != insn && NEXT_INSN (p) != 0
7467 && GET_CODE (NEXT_INSN (p)) == BARRIER)
7468 delete_insn (NEXT_INSN (p));
7470 cse_jumps_altered = 1;
7471 sets[i].rtl = 0;
7474 /* If destination is volatile, invalidate it and then do no further
7475 processing for this assignment. */
7477 else if (do_not_record)
7479 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7480 || GET_CODE (dest) == MEM)
7481 invalidate (dest, VOIDmode);
7482 else if (GET_CODE (dest) == STRICT_LOW_PART
7483 || GET_CODE (dest) == ZERO_EXTRACT)
7484 invalidate (XEXP (dest, 0), GET_MODE (dest));
7485 sets[i].rtl = 0;
7488 if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
7489 sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);
7491 #ifdef HAVE_cc0
7492 /* If setting CC0, record what it was set to, or a constant, if it
7493 is equivalent to a constant. If it is being set to a floating-point
7494 value, make a COMPARE with the appropriate constant of 0. If we
7495 don't do this, later code can interpret this as a test against
7496 const0_rtx, which can cause problems if we try to put it into an
7497 insn as a floating-point operand. */
7498 if (dest == cc0_rtx)
7500 this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
7501 this_insn_cc0_mode = mode;
7502 if (FLOAT_MODE_P (mode))
7503 this_insn_cc0 = gen_rtx_COMPARE (VOIDmode, this_insn_cc0,
7504 CONST0_RTX (mode));
7506 #endif
7509 /* Now enter all non-volatile source expressions in the hash table
7510 if they are not already present.
7511 Record their equivalence classes in src_elt.
7512 This way we can insert the corresponding destinations into
7513 the same classes even if the actual sources are no longer in them
7514 (having been invalidated). */
7516 if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
7517 && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
7519 register struct table_elt *elt;
7520 register struct table_elt *classp = sets[0].src_elt;
7521 rtx dest = SET_DEST (sets[0].rtl);
7522 enum machine_mode eqvmode = GET_MODE (dest);
7524 if (GET_CODE (dest) == STRICT_LOW_PART)
7526 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
7527 classp = 0;
7529 if (insert_regs (src_eqv, classp, 0))
7531 rehash_using_reg (src_eqv);
7532 src_eqv_hash = HASH (src_eqv, eqvmode);
7534 elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
7535 elt->in_memory = src_eqv_in_memory;
7536 elt->in_struct = src_eqv_in_struct;
7537 src_eqv_elt = elt;
7539 /* Check to see if src_eqv_elt is the same as a set source which
7540 does not yet have an elt, and if so set the elt of the set source
7541 to src_eqv_elt. */
7542 for (i = 0; i < n_sets; i++)
7543 if (sets[i].rtl && sets[i].src_elt == 0
7544 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
7545 sets[i].src_elt = src_eqv_elt;
7548 for (i = 0; i < n_sets; i++)
7549 if (sets[i].rtl && ! sets[i].src_volatile
7550 && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
7552 if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
7554 /* REG_EQUAL in setting a STRICT_LOW_PART
7555 gives an equivalent for the entire destination register,
7556 not just for the subreg being stored in now.
7557 This is a more interesting equivalence, so we arrange later
7558 to treat the entire reg as the destination. */
7559 sets[i].src_elt = src_eqv_elt;
7560 sets[i].src_hash = src_eqv_hash;
7562 else
7564 /* Insert source and constant equivalent into hash table, if not
7565 already present. */
7566 register struct table_elt *classp = src_eqv_elt;
7567 register rtx src = sets[i].src;
7568 register rtx dest = SET_DEST (sets[i].rtl);
7569 enum machine_mode mode
7570 = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
7572 /* Don't put a hard register source into the table if this is
7573 the last insn of a libcall. */
7574 if (sets[i].src_elt == 0
7575 && (GET_CODE (src) != REG
7576 || REGNO (src) >= FIRST_PSEUDO_REGISTER
7577 || ! find_reg_note (insn, REG_RETVAL, NULL_RTX)))
7579 register struct table_elt *elt;
7581 /* Note that these insert_regs calls cannot remove
7582 any of the src_elt's, because they would have failed to
7583 match if not still valid. */
7584 if (insert_regs (src, classp, 0))
7586 rehash_using_reg (src);
7587 sets[i].src_hash = HASH (src, mode);
7589 elt = insert (src, classp, sets[i].src_hash, mode);
7590 elt->in_memory = sets[i].src_in_memory;
7591 elt->in_struct = sets[i].src_in_struct;
7592 sets[i].src_elt = classp = elt;
7595 if (sets[i].src_const && sets[i].src_const_elt == 0
7596 && src != sets[i].src_const
7597 && ! rtx_equal_p (sets[i].src_const, src))
7598 sets[i].src_elt = insert (sets[i].src_const, classp,
7599 sets[i].src_const_hash, mode);
7602 else if (sets[i].src_elt == 0)
7603 /* If we did not insert the source into the hash table (e.g., it was
7604 volatile), note the equivalence class for the REG_EQUAL value, if any,
7605 so that the destination goes into that class. */
7606 sets[i].src_elt = src_eqv_elt;
7608 invalidate_from_clobbers (x);
7610 /* Some registers are invalidated by subroutine calls. Memory is
7611 invalidated by non-constant calls. */
7613 if (GET_CODE (insn) == CALL_INSN)
7615 if (! CONST_CALL_P (insn))
7616 invalidate_memory ();
7617 invalidate_for_call ();
7620 /* Now invalidate everything set by this instruction.
7621 If a SUBREG or other funny destination is being set,
7622 sets[i].rtl is still nonzero, so here we invalidate the reg
7623 a part of which is being set. */
7625 for (i = 0; i < n_sets; i++)
7626 if (sets[i].rtl)
7628 /* We can't use the inner dest, because the mode associated with
7629 a ZERO_EXTRACT is significant. */
7630 register rtx dest = SET_DEST (sets[i].rtl);
7632 /* Needed for registers to remove the register from its
7633 previous quantity's chain.
7634 Needed for memory if this is a nonvarying address, unless
7635 we have just done an invalidate_memory that covers even those. */
7636 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7637 || GET_CODE (dest) == MEM)
7638 invalidate (dest, VOIDmode);
7639 else if (GET_CODE (dest) == STRICT_LOW_PART
7640 || GET_CODE (dest) == ZERO_EXTRACT)
7641 invalidate (XEXP (dest, 0), GET_MODE (dest));
7644 /* A volatile ASM invalidates everything. */
7645 if (GET_CODE (insn) == INSN
7646 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
7647 && MEM_VOLATILE_P (PATTERN (insn)))
7648 flush_hash_table ();
7650 /* Make sure registers mentioned in destinations
7651 are safe for use in an expression to be inserted.
7652 This removes from the hash table
7653 any invalid entry that refers to one of these registers.
7655 We don't care about the return value from mention_regs because
7656 we are going to hash the SET_DEST values unconditionally. */
7658 for (i = 0; i < n_sets; i++)
7660 if (sets[i].rtl)
7662 rtx x = SET_DEST (sets[i].rtl);
7664 if (GET_CODE (x) != REG)
7665 mention_regs (x);
7666 else
7668 /* We used to rely on all references to a register becoming
7669 inaccessible when a register changes to a new quantity,
7670 since that changes the hash code. However, that is not
7671 safe, since after NBUCKETS new quantities we get a
7672 hash 'collision' of a register with its own invalid
7673 entries. And since SUBREGs have been changed not to
7674 change their hash code with the hash code of the register,
7675 it wouldn't work any longer at all. So we have to check
7676 for any invalid references lying around now.
7677 This code is similar to the REG case in mention_regs,
7678 but it knows that reg_tick has been incremented, and
7679 it leaves reg_in_table as -1 . */
7680 register int regno = REGNO (x);
7681 register int endregno
7682 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
7683 : HARD_REGNO_NREGS (regno, GET_MODE (x)));
7684 int i;
7686 for (i = regno; i < endregno; i++)
7688 if (REG_IN_TABLE (i) >= 0)
7690 remove_invalid_refs (i);
7691 REG_IN_TABLE (i) = -1;
7698 /* We may have just removed some of the src_elt's from the hash table.
7699 So replace each one with the current head of the same class. */
7701 for (i = 0; i < n_sets; i++)
7702 if (sets[i].rtl)
7704 if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
7705 /* If elt was removed, find current head of same class,
7706 or 0 if nothing remains of that class. */
7708 register struct table_elt *elt = sets[i].src_elt;
7710 while (elt && elt->prev_same_value)
7711 elt = elt->prev_same_value;
7713 while (elt && elt->first_same_value == 0)
7714 elt = elt->next_same_value;
7715 sets[i].src_elt = elt ? elt->first_same_value : 0;
7719 /* Now insert the destinations into their equivalence classes. */
7721 for (i = 0; i < n_sets; i++)
7722 if (sets[i].rtl)
7724 register rtx dest = SET_DEST (sets[i].rtl);
7725 rtx inner_dest = sets[i].inner_dest;
7726 register struct table_elt *elt;
7728 /* Don't record value if we are not supposed to risk allocating
7729 floating-point values in registers that might be wider than
7730 memory. */
7731 if ((flag_float_store
7732 && GET_CODE (dest) == MEM
7733 && FLOAT_MODE_P (GET_MODE (dest)))
7734 /* Don't record BLKmode values, because we don't know the
7735 size of it, and can't be sure that other BLKmode values
7736 have the same or smaller size. */
7737 || GET_MODE (dest) == BLKmode
7738 /* Don't record values of destinations set inside a libcall block
7739 since we might delete the libcall. Things should have been set
7740 up so we won't want to reuse such a value, but we play it safe
7741 here. */
7742 || libcall_insn
7743 /* If we didn't put a REG_EQUAL value or a source into the hash
7744 table, there is no point is recording DEST. */
7745 || sets[i].src_elt == 0
7746 /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND
7747 or SIGN_EXTEND, don't record DEST since it can cause
7748 some tracking to be wrong.
7750 ??? Think about this more later. */
7751 || (GET_CODE (dest) == SUBREG
7752 && (GET_MODE_SIZE (GET_MODE (dest))
7753 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7754 && (GET_CODE (sets[i].src) == SIGN_EXTEND
7755 || GET_CODE (sets[i].src) == ZERO_EXTEND)))
7756 continue;
7758 /* STRICT_LOW_PART isn't part of the value BEING set,
7759 and neither is the SUBREG inside it.
7760 Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
7761 if (GET_CODE (dest) == STRICT_LOW_PART)
7762 dest = SUBREG_REG (XEXP (dest, 0));
7764 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
7765 /* Registers must also be inserted into chains for quantities. */
7766 if (insert_regs (dest, sets[i].src_elt, 1))
7768 /* If `insert_regs' changes something, the hash code must be
7769 recalculated. */
7770 rehash_using_reg (dest);
7771 sets[i].dest_hash = HASH (dest, GET_MODE (dest));
7774 if (GET_CODE (inner_dest) == MEM
7775 && GET_CODE (XEXP (inner_dest, 0)) == ADDRESSOF)
7776 /* Given (SET (MEM (ADDRESSOF (X))) Y) we don't want to say
7777 that (MEM (ADDRESSOF (X))) is equivalent to Y.
7778 Consider the case in which the address of the MEM is
7779 passed to a function, which alters the MEM. Then, if we
7780 later use Y instead of the MEM we'll miss the update. */
7781 elt = insert (dest, 0, sets[i].dest_hash, GET_MODE (dest));
7782 else
7783 elt = insert (dest, sets[i].src_elt,
7784 sets[i].dest_hash, GET_MODE (dest));
7786 elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
7787 && (! RTX_UNCHANGING_P (sets[i].inner_dest)
7788 || FIXED_BASE_PLUS_P (XEXP (sets[i].inner_dest,
7789 0))));
7791 if (elt->in_memory)
7793 /* This implicitly assumes a whole struct
7794 need not have MEM_IN_STRUCT_P.
7795 But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
7796 elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
7797 || sets[i].inner_dest != SET_DEST (sets[i].rtl));
7800 /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
7801 narrower than M2, and both M1 and M2 are the same number of words,
7802 we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
7803 make that equivalence as well.
7805 However, BAR may have equivalences for which gen_lowpart_if_possible
7806 will produce a simpler value than gen_lowpart_if_possible applied to
7807 BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
7808 BAR's equivalences. If we don't get a simplified form, make
7809 the SUBREG. It will not be used in an equivalence, but will
7810 cause two similar assignments to be detected.
7812 Note the loop below will find SUBREG_REG (DEST) since we have
7813 already entered SRC and DEST of the SET in the table. */
7815 if (GET_CODE (dest) == SUBREG
7816 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
7817 / UNITS_PER_WORD)
7818 == (GET_MODE_SIZE (GET_MODE (dest)) - 1)/ UNITS_PER_WORD)
7819 && (GET_MODE_SIZE (GET_MODE (dest))
7820 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7821 && sets[i].src_elt != 0)
7823 enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
7824 struct table_elt *elt, *classp = 0;
7826 for (elt = sets[i].src_elt->first_same_value; elt;
7827 elt = elt->next_same_value)
7829 rtx new_src = 0;
7830 unsigned src_hash;
7831 struct table_elt *src_elt;
7833 /* Ignore invalid entries. */
7834 if (GET_CODE (elt->exp) != REG
7835 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
7836 continue;
7838 new_src = gen_lowpart_if_possible (new_mode, elt->exp);
7839 if (new_src == 0)
7840 new_src = gen_rtx_SUBREG (new_mode, elt->exp, 0);
7842 src_hash = HASH (new_src, new_mode);
7843 src_elt = lookup (new_src, src_hash, new_mode);
7845 /* Put the new source in the hash table is if isn't
7846 already. */
7847 if (src_elt == 0)
7849 if (insert_regs (new_src, classp, 0))
7851 rehash_using_reg (new_src);
7852 src_hash = HASH (new_src, new_mode);
7854 src_elt = insert (new_src, classp, src_hash, new_mode);
7855 src_elt->in_memory = elt->in_memory;
7856 src_elt->in_struct = elt->in_struct;
7858 else if (classp && classp != src_elt->first_same_value)
7859 /* Show that two things that we've seen before are
7860 actually the same. */
7861 merge_equiv_classes (src_elt, classp);
7863 classp = src_elt->first_same_value;
7864 /* Ignore invalid entries. */
7865 while (classp
7866 && GET_CODE (classp->exp) != REG
7867 && ! exp_equiv_p (classp->exp, classp->exp, 1, 0))
7868 classp = classp->next_same_value;
7873 /* Special handling for (set REG0 REG1)
7874 where REG0 is the "cheapest", cheaper than REG1.
7875 After cse, REG1 will probably not be used in the sequel,
7876 so (if easily done) change this insn to (set REG1 REG0) and
7877 replace REG1 with REG0 in the previous insn that computed their value.
7878 Then REG1 will become a dead store and won't cloud the situation
7879 for later optimizations.
7881 Do not make this change if REG1 is a hard register, because it will
7882 then be used in the sequel and we may be changing a two-operand insn
7883 into a three-operand insn.
7885 Also do not do this if we are operating on a copy of INSN.
7887 Also don't do this if INSN ends a libcall; this would cause an unrelated
7888 register to be set in the middle of a libcall, and we then get bad code
7889 if the libcall is deleted. */
7891 if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
7892 && NEXT_INSN (PREV_INSN (insn)) == insn
7893 && GET_CODE (SET_SRC (sets[0].rtl)) == REG
7894 && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
7895 && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
7896 && (qty_first_reg[REG_QTY (REGNO (SET_SRC (sets[0].rtl)))]
7897 == REGNO (SET_DEST (sets[0].rtl)))
7898 && ! find_reg_note (insn, REG_RETVAL, NULL_RTX))
7900 rtx prev = PREV_INSN (insn);
7901 while (prev && GET_CODE (prev) == NOTE)
7902 prev = PREV_INSN (prev);
7904 if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
7905 && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
7907 rtx dest = SET_DEST (sets[0].rtl);
7908 rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
7910 validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
7911 validate_change (insn, & SET_DEST (sets[0].rtl),
7912 SET_SRC (sets[0].rtl), 1);
7913 validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
7914 apply_change_group ();
7916 /* If REG1 was equivalent to a constant, REG0 is not. */
7917 if (note)
7918 PUT_REG_NOTE_KIND (note, REG_EQUAL);
7920 /* If there was a REG_WAS_0 note on PREV, remove it. Move
7921 any REG_WAS_0 note on INSN to PREV. */
7922 note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
7923 if (note)
7924 remove_note (prev, note);
7926 note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7927 if (note)
7929 remove_note (insn, note);
7930 XEXP (note, 1) = REG_NOTES (prev);
7931 REG_NOTES (prev) = note;
7934 /* If INSN has a REG_EQUAL note, and this note mentions REG0,
7935 then we must delete it, because the value in REG0 has changed. */
7936 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
7937 if (note && reg_mentioned_p (dest, XEXP (note, 0)))
7938 remove_note (insn, note);
7942 /* If this is a conditional jump insn, record any known equivalences due to
7943 the condition being tested. */
7945 last_jump_equiv_class = 0;
7946 if (GET_CODE (insn) == JUMP_INSN
7947 && n_sets == 1 && GET_CODE (x) == SET
7948 && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
7949 record_jump_equiv (insn, 0);
7951 #ifdef HAVE_cc0
7952 /* If the previous insn set CC0 and this insn no longer references CC0,
7953 delete the previous insn. Here we use the fact that nothing expects CC0
7954 to be valid over an insn, which is true until the final pass. */
7955 if (prev_insn && GET_CODE (prev_insn) == INSN
7956 && (tem = single_set (prev_insn)) != 0
7957 && SET_DEST (tem) == cc0_rtx
7958 && ! reg_mentioned_p (cc0_rtx, x))
7960 PUT_CODE (prev_insn, NOTE);
7961 NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
7962 NOTE_SOURCE_FILE (prev_insn) = 0;
7965 prev_insn_cc0 = this_insn_cc0;
7966 prev_insn_cc0_mode = this_insn_cc0_mode;
7967 #endif
7969 prev_insn = insn;
7972 /* Remove from the hash table all expressions that reference memory. */
7973 static void
7974 invalidate_memory ()
7976 register int i;
7977 register struct table_elt *p, *next;
7979 for (i = 0; i < NBUCKETS; i++)
7980 for (p = table[i]; p; p = next)
7982 next = p->next_same_hash;
7983 if (p->in_memory)
7984 remove_from_table (p, i);
7988 /* XXX ??? The name of this function bears little resemblance to
7989 what this function actually does. FIXME. */
7990 static int
7991 note_mem_written (addr)
7992 register rtx addr;
7994 /* Pushing or popping the stack invalidates just the stack pointer. */
7995 if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
7996 || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
7997 && GET_CODE (XEXP (addr, 0)) == REG
7998 && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
8000 if (REG_TICK (STACK_POINTER_REGNUM) >= 0)
8001 REG_TICK (STACK_POINTER_REGNUM)++;
8003 /* This should be *very* rare. */
8004 if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
8005 invalidate (stack_pointer_rtx, VOIDmode);
8006 return 1;
8008 return 0;
8011 /* Perform invalidation on the basis of everything about an insn
8012 except for invalidating the actual places that are SET in it.
8013 This includes the places CLOBBERed, and anything that might
8014 alias with something that is SET or CLOBBERed.
8016 X is the pattern of the insn. */
8018 static void
8019 invalidate_from_clobbers (x)
8020 rtx x;
8022 if (GET_CODE (x) == CLOBBER)
8024 rtx ref = XEXP (x, 0);
8025 if (ref)
8027 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
8028 || GET_CODE (ref) == MEM)
8029 invalidate (ref, VOIDmode);
8030 else if (GET_CODE (ref) == STRICT_LOW_PART
8031 || GET_CODE (ref) == ZERO_EXTRACT)
8032 invalidate (XEXP (ref, 0), GET_MODE (ref));
8035 else if (GET_CODE (x) == PARALLEL)
8037 register int i;
8038 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8040 register rtx y = XVECEXP (x, 0, i);
8041 if (GET_CODE (y) == CLOBBER)
8043 rtx ref = XEXP (y, 0);
8044 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
8045 || GET_CODE (ref) == MEM)
8046 invalidate (ref, VOIDmode);
8047 else if (GET_CODE (ref) == STRICT_LOW_PART
8048 || GET_CODE (ref) == ZERO_EXTRACT)
8049 invalidate (XEXP (ref, 0), GET_MODE (ref));
8055 /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
8056 and replace any registers in them with either an equivalent constant
8057 or the canonical form of the register. If we are inside an address,
8058 only do this if the address remains valid.
8060 OBJECT is 0 except when within a MEM in which case it is the MEM.
8062 Return the replacement for X. */
8064 static rtx
8065 cse_process_notes (x, object)
8066 rtx x;
8067 rtx object;
8069 enum rtx_code code = GET_CODE (x);
8070 const char *fmt = GET_RTX_FORMAT (code);
8071 int i;
8073 switch (code)
8075 case CONST_INT:
8076 case CONST:
8077 case SYMBOL_REF:
8078 case LABEL_REF:
8079 case CONST_DOUBLE:
8080 case PC:
8081 case CC0:
8082 case LO_SUM:
8083 return x;
8085 case MEM:
8086 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
8087 return x;
8089 case EXPR_LIST:
8090 case INSN_LIST:
8091 if (REG_NOTE_KIND (x) == REG_EQUAL)
8092 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
8093 if (XEXP (x, 1))
8094 XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
8095 return x;
8097 case SIGN_EXTEND:
8098 case ZERO_EXTEND:
8099 case SUBREG:
8101 rtx new = cse_process_notes (XEXP (x, 0), object);
8102 /* We don't substitute VOIDmode constants into these rtx,
8103 since they would impede folding. */
8104 if (GET_MODE (new) != VOIDmode)
8105 validate_change (object, &XEXP (x, 0), new, 0);
8106 return x;
8109 case REG:
8110 i = REG_QTY (REGNO (x));
8112 /* Return a constant or a constant register. */
8113 if (REGNO_QTY_VALID_P (REGNO (x))
8114 && qty_const[i] != 0
8115 && (CONSTANT_P (qty_const[i])
8116 || GET_CODE (qty_const[i]) == REG))
8118 rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
8119 if (new)
8120 return new;
8123 /* Otherwise, canonicalize this register. */
8124 return canon_reg (x, NULL_RTX);
8126 default:
8127 break;
8130 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8131 if (fmt[i] == 'e')
8132 validate_change (object, &XEXP (x, i),
8133 cse_process_notes (XEXP (x, i), object), 0);
8135 return x;
8138 /* Find common subexpressions between the end test of a loop and the beginning
8139 of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
8141 Often we have a loop where an expression in the exit test is used
8142 in the body of the loop. For example "while (*p) *q++ = *p++;".
8143 Because of the way we duplicate the loop exit test in front of the loop,
8144 however, we don't detect that common subexpression. This will be caught
8145 when global cse is implemented, but this is a quite common case.
8147 This function handles the most common cases of these common expressions.
8148 It is called after we have processed the basic block ending with the
8149 NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
8150 jumps to a label used only once. */
8152 static void
8153 cse_around_loop (loop_start)
8154 rtx loop_start;
8156 rtx insn;
8157 int i;
8158 struct table_elt *p;
8160 /* If the jump at the end of the loop doesn't go to the start, we don't
8161 do anything. */
8162 for (insn = PREV_INSN (loop_start);
8163 insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
8164 insn = PREV_INSN (insn))
8167 if (insn == 0
8168 || GET_CODE (insn) != NOTE
8169 || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
8170 return;
8172 /* If the last insn of the loop (the end test) was an NE comparison,
8173 we will interpret it as an EQ comparison, since we fell through
8174 the loop. Any equivalences resulting from that comparison are
8175 therefore not valid and must be invalidated. */
8176 if (last_jump_equiv_class)
8177 for (p = last_jump_equiv_class->first_same_value; p;
8178 p = p->next_same_value)
8180 if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
8181 || (GET_CODE (p->exp) == SUBREG
8182 && GET_CODE (SUBREG_REG (p->exp)) == REG))
8183 invalidate (p->exp, VOIDmode);
8184 else if (GET_CODE (p->exp) == STRICT_LOW_PART
8185 || GET_CODE (p->exp) == ZERO_EXTRACT)
8186 invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
8189 /* Process insns starting after LOOP_START until we hit a CALL_INSN or
8190 a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
8192 The only thing we do with SET_DEST is invalidate entries, so we
8193 can safely process each SET in order. It is slightly less efficient
8194 to do so, but we only want to handle the most common cases.
8196 The gen_move_insn call in cse_set_around_loop may create new pseudos.
8197 These pseudos won't have valid entries in any of the tables indexed
8198 by register number, such as reg_qty. We avoid out-of-range array
8199 accesses by not processing any instructions created after cse started. */
8201 for (insn = NEXT_INSN (loop_start);
8202 GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
8203 && INSN_UID (insn) < max_insn_uid
8204 && ! (GET_CODE (insn) == NOTE
8205 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
8206 insn = NEXT_INSN (insn))
8208 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8209 && (GET_CODE (PATTERN (insn)) == SET
8210 || GET_CODE (PATTERN (insn)) == CLOBBER))
8211 cse_set_around_loop (PATTERN (insn), insn, loop_start);
8212 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
8213 && GET_CODE (PATTERN (insn)) == PARALLEL)
8214 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8215 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
8216 || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
8217 cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
8218 loop_start);
8222 /* Process one SET of an insn that was skipped. We ignore CLOBBERs
8223 since they are done elsewhere. This function is called via note_stores. */
8225 static void
8226 invalidate_skipped_set (dest, set)
8227 rtx set;
8228 rtx dest;
8230 enum rtx_code code = GET_CODE (dest);
8232 if (code == MEM
8233 && ! note_mem_written (dest) /* If this is not a stack push ... */
8234 /* There are times when an address can appear varying and be a PLUS
8235 during this scan when it would be a fixed address were we to know
8236 the proper equivalences. So invalidate all memory if there is
8237 a BLKmode or nonscalar memory reference or a reference to a
8238 variable address. */
8239 && (MEM_IN_STRUCT_P (dest) || GET_MODE (dest) == BLKmode
8240 || cse_rtx_varies_p (XEXP (dest, 0))))
8242 invalidate_memory ();
8243 return;
8246 if (GET_CODE (set) == CLOBBER
8247 #ifdef HAVE_cc0
8248 || dest == cc0_rtx
8249 #endif
8250 || dest == pc_rtx)
8251 return;
8253 if (code == STRICT_LOW_PART || code == ZERO_EXTRACT)
8254 invalidate (XEXP (dest, 0), GET_MODE (dest));
8255 else if (code == REG || code == SUBREG || code == MEM)
8256 invalidate (dest, VOIDmode);
8259 /* Invalidate all insns from START up to the end of the function or the
8260 next label. This called when we wish to CSE around a block that is
8261 conditionally executed. */
8263 static void
8264 invalidate_skipped_block (start)
8265 rtx start;
8267 rtx insn;
8269 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
8270 insn = NEXT_INSN (insn))
8272 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
8273 continue;
8275 if (GET_CODE (insn) == CALL_INSN)
8277 if (! CONST_CALL_P (insn))
8278 invalidate_memory ();
8279 invalidate_for_call ();
8282 invalidate_from_clobbers (PATTERN (insn));
8283 note_stores (PATTERN (insn), invalidate_skipped_set);
8287 /* Used for communication between the following two routines; contains a
8288 value to be checked for modification. */
8290 static rtx cse_check_loop_start_value;
8292 /* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
8293 indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
8295 static void
8296 cse_check_loop_start (x, set)
8297 rtx x;
8298 rtx set ATTRIBUTE_UNUSED;
8300 if (cse_check_loop_start_value == 0
8301 || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
8302 return;
8304 if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
8305 || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
8306 cse_check_loop_start_value = 0;
8309 /* X is a SET or CLOBBER contained in INSN that was found near the start of
8310 a loop that starts with the label at LOOP_START.
8312 If X is a SET, we see if its SET_SRC is currently in our hash table.
8313 If so, we see if it has a value equal to some register used only in the
8314 loop exit code (as marked by jump.c).
8316 If those two conditions are true, we search backwards from the start of
8317 the loop to see if that same value was loaded into a register that still
8318 retains its value at the start of the loop.
8320 If so, we insert an insn after the load to copy the destination of that
8321 load into the equivalent register and (try to) replace our SET_SRC with that
8322 register.
8324 In any event, we invalidate whatever this SET or CLOBBER modifies. */
8326 static void
8327 cse_set_around_loop (x, insn, loop_start)
8328 rtx x;
8329 rtx insn;
8330 rtx loop_start;
8332 struct table_elt *src_elt;
8334 /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
8335 are setting PC or CC0 or whose SET_SRC is already a register. */
8336 if (GET_CODE (x) == SET
8337 && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
8338 && GET_CODE (SET_SRC (x)) != REG)
8340 src_elt = lookup (SET_SRC (x),
8341 HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
8342 GET_MODE (SET_DEST (x)));
8344 if (src_elt)
8345 for (src_elt = src_elt->first_same_value; src_elt;
8346 src_elt = src_elt->next_same_value)
8347 if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
8348 && COST (src_elt->exp) < COST (SET_SRC (x)))
8350 rtx p, set;
8352 /* Look for an insn in front of LOOP_START that sets
8353 something in the desired mode to SET_SRC (x) before we hit
8354 a label or CALL_INSN. */
8356 for (p = prev_nonnote_insn (loop_start);
8357 p && GET_CODE (p) != CALL_INSN
8358 && GET_CODE (p) != CODE_LABEL;
8359 p = prev_nonnote_insn (p))
8360 if ((set = single_set (p)) != 0
8361 && GET_CODE (SET_DEST (set)) == REG
8362 && GET_MODE (SET_DEST (set)) == src_elt->mode
8363 && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
8365 /* We now have to ensure that nothing between P
8366 and LOOP_START modified anything referenced in
8367 SET_SRC (x). We know that nothing within the loop
8368 can modify it, or we would have invalidated it in
8369 the hash table. */
8370 rtx q;
8372 cse_check_loop_start_value = SET_SRC (x);
8373 for (q = p; q != loop_start; q = NEXT_INSN (q))
8374 if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
8375 note_stores (PATTERN (q), cse_check_loop_start);
8377 /* If nothing was changed and we can replace our
8378 SET_SRC, add an insn after P to copy its destination
8379 to what we will be replacing SET_SRC with. */
8380 if (cse_check_loop_start_value
8381 && validate_change (insn, &SET_SRC (x),
8382 src_elt->exp, 0))
8384 /* If this creates new pseudos, this is unsafe,
8385 because the regno of new pseudo is unsuitable
8386 to index into reg_qty when cse_insn processes
8387 the new insn. Therefore, if a new pseudo was
8388 created, discard this optimization. */
8389 int nregs = max_reg_num ();
8390 rtx move
8391 = gen_move_insn (src_elt->exp, SET_DEST (set));
8392 if (nregs != max_reg_num ())
8394 if (! validate_change (insn, &SET_SRC (x),
8395 SET_SRC (set), 0))
8396 abort ();
8398 else
8399 emit_insn_after (move, p);
8401 break;
8406 /* Now invalidate anything modified by X. */
8407 note_mem_written (SET_DEST (x));
8409 /* See comment on similar code in cse_insn for explanation of these tests. */
8410 if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
8411 || GET_CODE (SET_DEST (x)) == MEM)
8412 invalidate (SET_DEST (x), VOIDmode);
8413 else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
8414 || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
8415 invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
8418 /* Find the end of INSN's basic block and return its range,
8419 the total number of SETs in all the insns of the block, the last insn of the
8420 block, and the branch path.
8422 The branch path indicates which branches should be followed. If a non-zero
8423 path size is specified, the block should be rescanned and a different set
8424 of branches will be taken. The branch path is only used if
8425 FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
8427 DATA is a pointer to a struct cse_basic_block_data, defined below, that is
8428 used to describe the block. It is filled in with the information about
8429 the current block. The incoming structure's branch path, if any, is used
8430 to construct the output branch path. */
8432 void
8433 cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
8434 rtx insn;
8435 struct cse_basic_block_data *data;
8436 int follow_jumps;
8437 int after_loop;
8438 int skip_blocks;
8440 rtx p = insn, q;
8441 int nsets = 0;
8442 int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
8443 rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
8444 int path_size = data->path_size;
8445 int path_entry = 0;
8446 int i;
8448 /* Update the previous branch path, if any. If the last branch was
8449 previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
8450 shorten the path by one and look at the previous branch. We know that
8451 at least one branch must have been taken if PATH_SIZE is non-zero. */
8452 while (path_size > 0)
8454 if (data->path[path_size - 1].status != NOT_TAKEN)
8456 data->path[path_size - 1].status = NOT_TAKEN;
8457 break;
8459 else
8460 path_size--;
8463 /* Scan to end of this basic block. */
8464 while (p && GET_CODE (p) != CODE_LABEL)
8466 /* Don't cse out the end of a loop. This makes a difference
8467 only for the unusual loops that always execute at least once;
8468 all other loops have labels there so we will stop in any case.
8469 Cse'ing out the end of the loop is dangerous because it
8470 might cause an invariant expression inside the loop
8471 to be reused after the end of the loop. This would make it
8472 hard to move the expression out of the loop in loop.c,
8473 especially if it is one of several equivalent expressions
8474 and loop.c would like to eliminate it.
8476 If we are running after loop.c has finished, we can ignore
8477 the NOTE_INSN_LOOP_END. */
8479 if (! after_loop && GET_CODE (p) == NOTE
8480 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
8481 break;
8483 /* Don't cse over a call to setjmp; on some machines (eg vax)
8484 the regs restored by the longjmp come from
8485 a later time than the setjmp. */
8486 if (GET_CODE (p) == NOTE
8487 && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
8488 break;
8490 /* A PARALLEL can have lots of SETs in it,
8491 especially if it is really an ASM_OPERANDS. */
8492 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
8493 && GET_CODE (PATTERN (p)) == PARALLEL)
8494 nsets += XVECLEN (PATTERN (p), 0);
8495 else if (GET_CODE (p) != NOTE)
8496 nsets += 1;
8498 /* Ignore insns made by CSE; they cannot affect the boundaries of
8499 the basic block. */
8501 if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
8502 high_cuid = INSN_CUID (p);
8503 if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
8504 low_cuid = INSN_CUID (p);
8506 /* See if this insn is in our branch path. If it is and we are to
8507 take it, do so. */
8508 if (path_entry < path_size && data->path[path_entry].branch == p)
8510 if (data->path[path_entry].status != NOT_TAKEN)
8511 p = JUMP_LABEL (p);
8513 /* Point to next entry in path, if any. */
8514 path_entry++;
8517 /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
8518 was specified, we haven't reached our maximum path length, there are
8519 insns following the target of the jump, this is the only use of the
8520 jump label, and the target label is preceded by a BARRIER.
8522 Alternatively, we can follow the jump if it branches around a
8523 block of code and there are no other branches into the block.
8524 In this case invalidate_skipped_block will be called to invalidate any
8525 registers set in the block when following the jump. */
8527 else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
8528 && GET_CODE (p) == JUMP_INSN
8529 && GET_CODE (PATTERN (p)) == SET
8530 && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
8531 && JUMP_LABEL (p) != 0
8532 && LABEL_NUSES (JUMP_LABEL (p)) == 1
8533 && NEXT_INSN (JUMP_LABEL (p)) != 0)
8535 for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
8536 if ((GET_CODE (q) != NOTE
8537 || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
8538 || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
8539 && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
8540 break;
8542 /* If we ran into a BARRIER, this code is an extension of the
8543 basic block when the branch is taken. */
8544 if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
8546 /* Don't allow ourself to keep walking around an
8547 always-executed loop. */
8548 if (next_real_insn (q) == next)
8550 p = NEXT_INSN (p);
8551 continue;
8554 /* Similarly, don't put a branch in our path more than once. */
8555 for (i = 0; i < path_entry; i++)
8556 if (data->path[i].branch == p)
8557 break;
8559 if (i != path_entry)
8560 break;
8562 data->path[path_entry].branch = p;
8563 data->path[path_entry++].status = TAKEN;
8565 /* This branch now ends our path. It was possible that we
8566 didn't see this branch the last time around (when the
8567 insn in front of the target was a JUMP_INSN that was
8568 turned into a no-op). */
8569 path_size = path_entry;
8571 p = JUMP_LABEL (p);
8572 /* Mark block so we won't scan it again later. */
8573 PUT_MODE (NEXT_INSN (p), QImode);
8575 /* Detect a branch around a block of code. */
8576 else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
8578 register rtx tmp;
8580 if (next_real_insn (q) == next)
8582 p = NEXT_INSN (p);
8583 continue;
8586 for (i = 0; i < path_entry; i++)
8587 if (data->path[i].branch == p)
8588 break;
8590 if (i != path_entry)
8591 break;
8593 /* This is no_labels_between_p (p, q) with an added check for
8594 reaching the end of a function (in case Q precedes P). */
8595 for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
8596 if (GET_CODE (tmp) == CODE_LABEL)
8597 break;
8599 if (tmp == q)
8601 data->path[path_entry].branch = p;
8602 data->path[path_entry++].status = AROUND;
8604 path_size = path_entry;
8606 p = JUMP_LABEL (p);
8607 /* Mark block so we won't scan it again later. */
8608 PUT_MODE (NEXT_INSN (p), QImode);
8612 p = NEXT_INSN (p);
8615 data->low_cuid = low_cuid;
8616 data->high_cuid = high_cuid;
8617 data->nsets = nsets;
8618 data->last = p;
8620 /* If all jumps in the path are not taken, set our path length to zero
8621 so a rescan won't be done. */
8622 for (i = path_size - 1; i >= 0; i--)
8623 if (data->path[i].status != NOT_TAKEN)
8624 break;
8626 if (i == -1)
8627 data->path_size = 0;
8628 else
8629 data->path_size = path_size;
8631 /* End the current branch path. */
8632 data->path[path_size].branch = 0;
8635 /* Perform cse on the instructions of a function.
8636 F is the first instruction.
8637 NREGS is one plus the highest pseudo-reg number used in the instruction.
8639 AFTER_LOOP is 1 if this is the cse call done after loop optimization
8640 (only if -frerun-cse-after-loop).
8642 Returns 1 if jump_optimize should be redone due to simplifications
8643 in conditional jump instructions. */
8646 cse_main (f, nregs, after_loop, file)
8647 rtx f;
8648 int nregs;
8649 int after_loop;
8650 FILE *file;
8652 struct cse_basic_block_data val;
8653 register rtx insn = f;
8654 register int i;
8656 cse_jumps_altered = 0;
8657 recorded_label_ref = 0;
8658 constant_pool_entries_cost = 0;
8659 val.path_size = 0;
8661 init_recog ();
8662 init_alias_analysis ();
8664 max_reg = nregs;
8666 max_insn_uid = get_max_uid ();
8668 reg_next_eqv = (int *) alloca (nregs * sizeof (int));
8669 reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
8671 #ifdef LOAD_EXTEND_OP
8673 /* Allocate scratch rtl here. cse_insn will fill in the memory reference
8674 and change the code and mode as appropriate. */
8675 memory_extend_rtx = gen_rtx_ZERO_EXTEND (VOIDmode, NULL_RTX);
8676 #endif
8678 /* Discard all the free elements of the previous function
8679 since they are allocated in the temporarily obstack. */
8680 bzero ((char *) table, sizeof table);
8681 free_element_chain = 0;
8682 n_elements_made = 0;
8684 /* Find the largest uid. */
8686 max_uid = get_max_uid ();
8687 uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
8688 bzero ((char *) uid_cuid, (max_uid + 1) * sizeof (int));
8690 /* Compute the mapping from uids to cuids.
8691 CUIDs are numbers assigned to insns, like uids,
8692 except that cuids increase monotonically through the code.
8693 Don't assign cuids to line-number NOTEs, so that the distance in cuids
8694 between two insns is not affected by -g. */
8696 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
8698 if (GET_CODE (insn) != NOTE
8699 || NOTE_LINE_NUMBER (insn) < 0)
8700 INSN_CUID (insn) = ++i;
8701 else
8702 /* Give a line number note the same cuid as preceding insn. */
8703 INSN_CUID (insn) = i;
8706 /* Initialize which registers are clobbered by calls. */
8708 CLEAR_HARD_REG_SET (regs_invalidated_by_call);
8710 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8711 if ((call_used_regs[i]
8712 /* Used to check !fixed_regs[i] here, but that isn't safe;
8713 fixed regs are still call-clobbered, and sched can get
8714 confused if they can "live across calls".
8716 The frame pointer is always preserved across calls. The arg
8717 pointer is if it is fixed. The stack pointer usually is, unless
8718 RETURN_POPS_ARGS, in which case an explicit CLOBBER
8719 will be present. If we are generating PIC code, the PIC offset
8720 table register is preserved across calls. */
8722 && i != STACK_POINTER_REGNUM
8723 && i != FRAME_POINTER_REGNUM
8724 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
8725 && i != HARD_FRAME_POINTER_REGNUM
8726 #endif
8727 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
8728 && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
8729 #endif
8730 #if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
8731 && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
8732 #endif
8734 || global_regs[i])
8735 SET_HARD_REG_BIT (regs_invalidated_by_call, i);
8737 if (ggc_p)
8738 ggc_push_context ();
8740 /* Loop over basic blocks.
8741 Compute the maximum number of qty's needed for each basic block
8742 (which is 2 for each SET). */
8743 insn = f;
8744 while (insn)
8746 cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
8747 flag_cse_skip_blocks);
8749 /* If this basic block was already processed or has no sets, skip it. */
8750 if (val.nsets == 0 || GET_MODE (insn) == QImode)
8752 PUT_MODE (insn, VOIDmode);
8753 insn = (val.last ? NEXT_INSN (val.last) : 0);
8754 val.path_size = 0;
8755 continue;
8758 cse_basic_block_start = val.low_cuid;
8759 cse_basic_block_end = val.high_cuid;
8760 max_qty = val.nsets * 2;
8762 if (file)
8763 fnotice (file, ";; Processing block from %d to %d, %d sets.\n",
8764 INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
8765 val.nsets);
8767 /* Make MAX_QTY bigger to give us room to optimize
8768 past the end of this basic block, if that should prove useful. */
8769 if (max_qty < 500)
8770 max_qty = 500;
8772 max_qty += max_reg;
8774 /* If this basic block is being extended by following certain jumps,
8775 (see `cse_end_of_basic_block'), we reprocess the code from the start.
8776 Otherwise, we start after this basic block. */
8777 if (val.path_size > 0)
8778 cse_basic_block (insn, val.last, val.path, 0);
8779 else
8781 int old_cse_jumps_altered = cse_jumps_altered;
8782 rtx temp;
8784 /* When cse changes a conditional jump to an unconditional
8785 jump, we want to reprocess the block, since it will give
8786 us a new branch path to investigate. */
8787 cse_jumps_altered = 0;
8788 temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
8789 if (cse_jumps_altered == 0
8790 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8791 insn = temp;
8793 cse_jumps_altered |= old_cse_jumps_altered;
8796 if (ggc_p)
8797 ggc_collect ();
8799 #ifdef USE_C_ALLOCA
8800 alloca (0);
8801 #endif
8804 if (ggc_p)
8805 ggc_pop_context ();
8807 /* Tell refers_to_mem_p that qty_const info is not available. */
8808 qty_const = 0;
8810 if (max_elements_made < n_elements_made)
8811 max_elements_made = n_elements_made;
8813 return cse_jumps_altered || recorded_label_ref;
8816 /* Process a single basic block. FROM and TO and the limits of the basic
8817 block. NEXT_BRANCH points to the branch path when following jumps or
8818 a null path when not following jumps.
8820 AROUND_LOOP is non-zero if we are to try to cse around to the start of a
8821 loop. This is true when we are being called for the last time on a
8822 block and this CSE pass is before loop.c. */
8824 static rtx
8825 cse_basic_block (from, to, next_branch, around_loop)
8826 register rtx from, to;
8827 struct branch_path *next_branch;
8828 int around_loop;
8830 register rtx insn;
8831 int to_usage = 0;
8832 rtx libcall_insn = NULL_RTX;
8833 int num_insns = 0;
8835 /* Each of these arrays is undefined before max_reg, so only allocate
8836 the space actually needed and adjust the start below. */
8838 qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8839 qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8840 qty_mode = (enum machine_mode *) alloca ((max_qty - max_reg)
8841 * sizeof (enum machine_mode));
8842 qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8843 qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8844 qty_comparison_code
8845 = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
8846 qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8847 qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8849 qty_first_reg -= max_reg;
8850 qty_last_reg -= max_reg;
8851 qty_mode -= max_reg;
8852 qty_const -= max_reg;
8853 qty_const_insn -= max_reg;
8854 qty_comparison_code -= max_reg;
8855 qty_comparison_qty -= max_reg;
8856 qty_comparison_const -= max_reg;
8858 new_basic_block ();
8860 /* TO might be a label. If so, protect it from being deleted. */
8861 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8862 ++LABEL_NUSES (to);
8864 for (insn = from; insn != to; insn = NEXT_INSN (insn))
8866 register enum rtx_code code = GET_CODE (insn);
8868 /* If we have processed 1,000 insns, flush the hash table to
8869 avoid extreme quadratic behavior. We must not include NOTEs
8870 in the count since there may be more or them when generating
8871 debugging information. If we clear the table at different
8872 times, code generated with -g -O might be different than code
8873 generated with -O but not -g.
8875 ??? This is a real kludge and needs to be done some other way.
8876 Perhaps for 2.9. */
8877 if (code != NOTE && num_insns++ > 1000)
8879 flush_hash_table ();
8880 num_insns = 0;
8883 /* See if this is a branch that is part of the path. If so, and it is
8884 to be taken, do so. */
8885 if (next_branch->branch == insn)
8887 enum taken status = next_branch++->status;
8888 if (status != NOT_TAKEN)
8890 if (status == TAKEN)
8891 record_jump_equiv (insn, 1);
8892 else
8893 invalidate_skipped_block (NEXT_INSN (insn));
8895 /* Set the last insn as the jump insn; it doesn't affect cc0.
8896 Then follow this branch. */
8897 #ifdef HAVE_cc0
8898 prev_insn_cc0 = 0;
8899 #endif
8900 prev_insn = insn;
8901 insn = JUMP_LABEL (insn);
8902 continue;
8906 if (GET_MODE (insn) == QImode)
8907 PUT_MODE (insn, VOIDmode);
8909 if (GET_RTX_CLASS (code) == 'i')
8911 rtx p;
8913 /* Process notes first so we have all notes in canonical forms when
8914 looking for duplicate operations. */
8916 if (REG_NOTES (insn))
8917 REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
8919 /* Track when we are inside in LIBCALL block. Inside such a block,
8920 we do not want to record destinations. The last insn of a
8921 LIBCALL block is not considered to be part of the block, since
8922 its destination is the result of the block and hence should be
8923 recorded. */
8925 if ((p = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
8926 libcall_insn = XEXP (p, 0);
8927 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8928 libcall_insn = NULL_RTX;
8930 cse_insn (insn, libcall_insn);
8933 /* If INSN is now an unconditional jump, skip to the end of our
8934 basic block by pretending that we just did the last insn in the
8935 basic block. If we are jumping to the end of our block, show
8936 that we can have one usage of TO. */
8938 if (simplejump_p (insn))
8940 if (to == 0)
8941 return 0;
8943 if (JUMP_LABEL (insn) == to)
8944 to_usage = 1;
8946 /* Maybe TO was deleted because the jump is unconditional.
8947 If so, there is nothing left in this basic block. */
8948 /* ??? Perhaps it would be smarter to set TO
8949 to whatever follows this insn,
8950 and pretend the basic block had always ended here. */
8951 if (INSN_DELETED_P (to))
8952 break;
8954 insn = PREV_INSN (to);
8957 /* See if it is ok to keep on going past the label
8958 which used to end our basic block. Remember that we incremented
8959 the count of that label, so we decrement it here. If we made
8960 a jump unconditional, TO_USAGE will be one; in that case, we don't
8961 want to count the use in that jump. */
8963 if (to != 0 && NEXT_INSN (insn) == to
8964 && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
8966 struct cse_basic_block_data val;
8967 rtx prev;
8969 insn = NEXT_INSN (to);
8971 if (LABEL_NUSES (to) == 0)
8972 insn = delete_insn (to);
8974 /* If TO was the last insn in the function, we are done. */
8975 if (insn == 0)
8976 return 0;
8978 /* If TO was preceded by a BARRIER we are done with this block
8979 because it has no continuation. */
8980 prev = prev_nonnote_insn (to);
8981 if (prev && GET_CODE (prev) == BARRIER)
8982 return insn;
8984 /* Find the end of the following block. Note that we won't be
8985 following branches in this case. */
8986 to_usage = 0;
8987 val.path_size = 0;
8988 cse_end_of_basic_block (insn, &val, 0, 0, 0);
8990 /* If the tables we allocated have enough space left
8991 to handle all the SETs in the next basic block,
8992 continue through it. Otherwise, return,
8993 and that block will be scanned individually. */
8994 if (val.nsets * 2 + next_qty > max_qty)
8995 break;
8997 cse_basic_block_start = val.low_cuid;
8998 cse_basic_block_end = val.high_cuid;
8999 to = val.last;
9001 /* Prevent TO from being deleted if it is a label. */
9002 if (to != 0 && GET_CODE (to) == CODE_LABEL)
9003 ++LABEL_NUSES (to);
9005 /* Back up so we process the first insn in the extension. */
9006 insn = PREV_INSN (insn);
9010 if (next_qty > max_qty)
9011 abort ();
9013 /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
9014 the previous insn is the only insn that branches to the head of a loop,
9015 we can cse into the loop. Don't do this if we changed the jump
9016 structure of a loop unless we aren't going to be following jumps. */
9018 if ((cse_jumps_altered == 0
9019 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
9020 && around_loop && to != 0
9021 && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
9022 && GET_CODE (PREV_INSN (to)) == JUMP_INSN
9023 && JUMP_LABEL (PREV_INSN (to)) != 0
9024 && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
9025 cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
9027 return to ? NEXT_INSN (to) : 0;
9030 /* Count the number of times registers are used (not set) in X.
9031 COUNTS is an array in which we accumulate the count, INCR is how much
9032 we count each register usage.
9034 Don't count a usage of DEST, which is the SET_DEST of a SET which
9035 contains X in its SET_SRC. This is because such a SET does not
9036 modify the liveness of DEST. */
9038 static void
9039 count_reg_usage (x, counts, dest, incr)
9040 rtx x;
9041 int *counts;
9042 rtx dest;
9043 int incr;
9045 enum rtx_code code;
9046 const char *fmt;
9047 int i, j;
9049 if (x == 0)
9050 return;
9052 switch (code = GET_CODE (x))
9054 case REG:
9055 if (x != dest)
9056 counts[REGNO (x)] += incr;
9057 return;
9059 case PC:
9060 case CC0:
9061 case CONST:
9062 case CONST_INT:
9063 case CONST_DOUBLE:
9064 case SYMBOL_REF:
9065 case LABEL_REF:
9066 return;
9068 case CLOBBER:
9069 /* If we are clobbering a MEM, mark any registers inside the address
9070 as being used. */
9071 if (GET_CODE (XEXP (x, 0)) == MEM)
9072 count_reg_usage (XEXP (XEXP (x, 0), 0), counts, NULL_RTX, incr);
9073 return;
9075 case SET:
9076 /* Unless we are setting a REG, count everything in SET_DEST. */
9077 if (GET_CODE (SET_DEST (x)) != REG)
9078 count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
9080 /* If SRC has side-effects, then we can't delete this insn, so the
9081 usage of SET_DEST inside SRC counts.
9083 ??? Strictly-speaking, we might be preserving this insn
9084 because some other SET has side-effects, but that's hard
9085 to do and can't happen now. */
9086 count_reg_usage (SET_SRC (x), counts,
9087 side_effects_p (SET_SRC (x)) ? NULL_RTX : SET_DEST (x),
9088 incr);
9089 return;
9091 case CALL_INSN:
9092 count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, NULL_RTX, incr);
9094 /* ... falls through ... */
9095 case INSN:
9096 case JUMP_INSN:
9097 count_reg_usage (PATTERN (x), counts, NULL_RTX, incr);
9099 /* Things used in a REG_EQUAL note aren't dead since loop may try to
9100 use them. */
9102 count_reg_usage (REG_NOTES (x), counts, NULL_RTX, incr);
9103 return;
9105 case EXPR_LIST:
9106 case INSN_LIST:
9107 if (REG_NOTE_KIND (x) == REG_EQUAL
9108 || (REG_NOTE_KIND (x) != REG_NONNEG && GET_CODE (XEXP (x,0)) == USE))
9109 count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
9110 count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
9111 return;
9113 default:
9114 break;
9117 fmt = GET_RTX_FORMAT (code);
9118 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9120 if (fmt[i] == 'e')
9121 count_reg_usage (XEXP (x, i), counts, dest, incr);
9122 else if (fmt[i] == 'E')
9123 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9124 count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
9128 /* Scan all the insns and delete any that are dead; i.e., they store a register
9129 that is never used or they copy a register to itself.
9131 This is used to remove insns made obviously dead by cse, loop or other
9132 optimizations. It improves the heuristics in loop since it won't try to
9133 move dead invariants out of loops or make givs for dead quantities. The
9134 remaining passes of the compilation are also sped up. */
9136 void
9137 delete_trivially_dead_insns (insns, nreg)
9138 rtx insns;
9139 int nreg;
9141 int *counts = (int *) alloca (nreg * sizeof (int));
9142 rtx insn, prev;
9143 #ifdef HAVE_cc0
9144 rtx tem;
9145 #endif
9146 int i;
9147 int in_libcall = 0, dead_libcall = 0;
9149 /* First count the number of times each register is used. */
9150 bzero ((char *) counts, sizeof (int) * nreg);
9151 for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
9152 count_reg_usage (insn, counts, NULL_RTX, 1);
9154 /* Go from the last insn to the first and delete insns that only set unused
9155 registers or copy a register to itself. As we delete an insn, remove
9156 usage counts for registers it uses.
9158 The first jump optimization pass may leave a real insn as the last
9159 insn in the function. We must not skip that insn or we may end
9160 up deleting code that is not really dead. */
9161 insn = get_last_insn ();
9162 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
9163 insn = prev_real_insn (insn);
9165 for ( ; insn; insn = prev)
9167 int live_insn = 0;
9168 rtx note;
9170 prev = prev_real_insn (insn);
9172 /* Don't delete any insns that are part of a libcall block unless
9173 we can delete the whole libcall block.
9175 Flow or loop might get confused if we did that. Remember
9176 that we are scanning backwards. */
9177 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
9179 in_libcall = 1;
9180 live_insn = 1;
9181 dead_libcall = 0;
9183 /* See if there's a REG_EQUAL note on this insn and try to
9184 replace the source with the REG_EQUAL expression.
9186 We assume that insns with REG_RETVALs can only be reg->reg
9187 copies at this point. */
9188 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
9189 if (note)
9191 rtx set = single_set (insn);
9192 if (set
9193 && validate_change (insn, &SET_SRC (set), XEXP (note, 0), 0))
9195 remove_note (insn,
9196 find_reg_note (insn, REG_RETVAL, NULL_RTX));
9197 dead_libcall = 1;
9201 else if (in_libcall)
9202 live_insn = ! dead_libcall;
9203 else if (GET_CODE (PATTERN (insn)) == SET)
9205 if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
9206 && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
9209 #ifdef HAVE_cc0
9210 else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
9211 && ! side_effects_p (SET_SRC (PATTERN (insn)))
9212 && ((tem = next_nonnote_insn (insn)) == 0
9213 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
9214 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
9216 #endif
9217 else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
9218 || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
9219 || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
9220 || side_effects_p (SET_SRC (PATTERN (insn)))
9221 /* An ADDRESSOF expression can turn into a use of the
9222 internal arg pointer, so always consider the
9223 internal arg pointer live. If it is truly dead,
9224 flow will delete the initializing insn. */
9225 || (SET_DEST (PATTERN (insn))
9226 == current_function_internal_arg_pointer))
9227 live_insn = 1;
9229 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9230 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9232 rtx elt = XVECEXP (PATTERN (insn), 0, i);
9234 if (GET_CODE (elt) == SET)
9236 if (GET_CODE (SET_DEST (elt)) == REG
9237 && SET_DEST (elt) == SET_SRC (elt))
9240 #ifdef HAVE_cc0
9241 else if (GET_CODE (SET_DEST (elt)) == CC0
9242 && ! side_effects_p (SET_SRC (elt))
9243 && ((tem = next_nonnote_insn (insn)) == 0
9244 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
9245 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
9247 #endif
9248 else if (GET_CODE (SET_DEST (elt)) != REG
9249 || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
9250 || counts[REGNO (SET_DEST (elt))] != 0
9251 || side_effects_p (SET_SRC (elt))
9252 /* An ADDRESSOF expression can turn into a use of the
9253 internal arg pointer, so always consider the
9254 internal arg pointer live. If it is truly dead,
9255 flow will delete the initializing insn. */
9256 || (SET_DEST (elt)
9257 == current_function_internal_arg_pointer))
9258 live_insn = 1;
9260 else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
9261 live_insn = 1;
9263 else
9264 live_insn = 1;
9266 /* If this is a dead insn, delete it and show registers in it aren't
9267 being used. */
9269 if (! live_insn)
9271 count_reg_usage (insn, counts, NULL_RTX, -1);
9272 delete_insn (insn);
9275 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
9277 in_libcall = 0;
9278 dead_libcall = 0;