(expand_inline_function): If called function calls alloca, save and
[official-gcc.git] / gcc / cse.c
blob487cc3f33c3ec627463d2c20bc356edcb0b0366d
1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 88, 89, 92, 93, 1994 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
21 #include "config.h"
22 /* Must precede rtl.h for FFS. */
23 #include <stdio.h>
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "flags.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "recog.h"
33 #include <setjmp.h>
35 /* The basic idea of common subexpression elimination is to go
36 through the code, keeping a record of expressions that would
37 have the same value at the current scan point, and replacing
38 expressions encountered with the cheapest equivalent expression.
40 It is too complicated to keep track of the different possibilities
41 when control paths merge; so, at each label, we forget all that is
42 known and start fresh. This can be described as processing each
43 basic block separately. Note, however, that these are not quite
44 the same as the basic blocks found by a later pass and used for
45 data flow analysis and register packing. We do not need to start fresh
46 after a conditional jump instruction if there is no label there.
48 We use two data structures to record the equivalent expressions:
49 a hash table for most expressions, and several vectors together
50 with "quantity numbers" to record equivalent (pseudo) registers.
52 The use of the special data structure for registers is desirable
53 because it is faster. It is possible because registers references
54 contain a fairly small number, the register number, taken from
55 a contiguously allocated series, and two register references are
56 identical if they have the same number. General expressions
57 do not have any such thing, so the only way to retrieve the
58 information recorded on an expression other than a register
59 is to keep it in a hash table.
61 Registers and "quantity numbers":
63 At the start of each basic block, all of the (hardware and pseudo)
64 registers used in the function are given distinct quantity
65 numbers to indicate their contents. During scan, when the code
66 copies one register into another, we copy the quantity number.
67 When a register is loaded in any other way, we allocate a new
68 quantity number to describe the value generated by this operation.
69 `reg_qty' records what quantity a register is currently thought
70 of as containing.
72 All real quantity numbers are greater than or equal to `max_reg'.
73 If register N has not been assigned a quantity, reg_qty[N] will equal N.
75 Quantity numbers below `max_reg' do not exist and none of the `qty_...'
76 variables should be referenced with an index below `max_reg'.
78 We also maintain a bidirectional chain of registers for each
79 quantity number. `qty_first_reg', `qty_last_reg',
80 `reg_next_eqv' and `reg_prev_eqv' hold these chains.
82 The first register in a chain is the one whose lifespan is least local.
83 Among equals, it is the one that was seen first.
84 We replace any equivalent register with that one.
86 If two registers have the same quantity number, it must be true that
87 REG expressions with `qty_mode' must be in the hash table for both
88 registers and must be in the same class.
90 The converse is not true. Since hard registers may be referenced in
91 any mode, two REG expressions might be equivalent in the hash table
92 but not have the same quantity number if the quantity number of one
93 of the registers is not the same mode as those expressions.
95 Constants and quantity numbers
97 When a quantity has a known constant value, that value is stored
98 in the appropriate element of qty_const. This is in addition to
99 putting the constant in the hash table as is usual for non-regs.
101 Whether a reg or a constant is preferred is determined by the configuration
102 macro CONST_COSTS and will often depend on the constant value. In any
103 event, expressions containing constants can be simplified, by fold_rtx.
105 When a quantity has a known nearly constant value (such as an address
106 of a stack slot), that value is stored in the appropriate element
107 of qty_const.
109 Integer constants don't have a machine mode. However, cse
110 determines the intended machine mode from the destination
111 of the instruction that moves the constant. The machine mode
112 is recorded in the hash table along with the actual RTL
113 constant expression so that different modes are kept separate.
115 Other expressions:
117 To record known equivalences among expressions in general
118 we use a hash table called `table'. It has a fixed number of buckets
119 that contain chains of `struct table_elt' elements for expressions.
120 These chains connect the elements whose expressions have the same
121 hash codes.
123 Other chains through the same elements connect the elements which
124 currently have equivalent values.
126 Register references in an expression are canonicalized before hashing
127 the expression. This is done using `reg_qty' and `qty_first_reg'.
128 The hash code of a register reference is computed using the quantity
129 number, not the register number.
131 When the value of an expression changes, it is necessary to remove from the
132 hash table not just that expression but all expressions whose values
133 could be different as a result.
135 1. If the value changing is in memory, except in special cases
136 ANYTHING referring to memory could be changed. That is because
137 nobody knows where a pointer does not point.
138 The function `invalidate_memory' removes what is necessary.
140 The special cases are when the address is constant or is
141 a constant plus a fixed register such as the frame pointer
142 or a static chain pointer. When such addresses are stored in,
143 we can tell exactly which other such addresses must be invalidated
144 due to overlap. `invalidate' does this.
145 All expressions that refer to non-constant
146 memory addresses are also invalidated. `invalidate_memory' does this.
148 2. If the value changing is a register, all expressions
149 containing references to that register, and only those,
150 must be removed.
152 Because searching the entire hash table for expressions that contain
153 a register is very slow, we try to figure out when it isn't necessary.
154 Precisely, this is necessary only when expressions have been
155 entered in the hash table using this register, and then the value has
156 changed, and then another expression wants to be added to refer to
157 the register's new value. This sequence of circumstances is rare
158 within any one basic block.
160 The vectors `reg_tick' and `reg_in_table' are used to detect this case.
161 reg_tick[i] is incremented whenever a value is stored in register i.
162 reg_in_table[i] holds -1 if no references to register i have been
163 entered in the table; otherwise, it contains the value reg_tick[i] had
164 when the references were entered. If we want to enter a reference
165 and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
166 Until we want to enter a new entry, the mere fact that the two vectors
167 don't match makes the entries be ignored if anyone tries to match them.
169 Registers themselves are entered in the hash table as well as in
170 the equivalent-register chains. However, the vectors `reg_tick'
171 and `reg_in_table' do not apply to expressions which are simple
172 register references. These expressions are removed from the table
173 immediately when they become invalid, and this can be done even if
174 we do not immediately search for all the expressions that refer to
175 the register.
177 A CLOBBER rtx in an instruction invalidates its operand for further
178 reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
179 invalidates everything that resides in memory.
181 Related expressions:
183 Constant expressions that differ only by an additive integer
184 are called related. When a constant expression is put in
185 the table, the related expression with no constant term
186 is also entered. These are made to point at each other
187 so that it is possible to find out if there exists any
188 register equivalent to an expression related to a given expression. */
190 /* One plus largest register number used in this function. */
192 static int max_reg;
194 /* Length of vectors indexed by quantity number.
195 We know in advance we will not need a quantity number this big. */
197 static int max_qty;
199 /* Next quantity number to be allocated.
200 This is 1 + the largest number needed so far. */
202 static int next_qty;
204 /* Indexed by quantity number, gives the first (or last) (pseudo) register
205 in the chain of registers that currently contain this quantity. */
207 static int *qty_first_reg;
208 static int *qty_last_reg;
210 /* Index by quantity number, gives the mode of the quantity. */
212 static enum machine_mode *qty_mode;
214 /* Indexed by quantity number, gives the rtx of the constant value of the
215 quantity, or zero if it does not have a known value.
216 A sum of the frame pointer (or arg pointer) plus a constant
217 can also be entered here. */
219 static rtx *qty_const;
221 /* Indexed by qty number, gives the insn that stored the constant value
222 recorded in `qty_const'. */
224 static rtx *qty_const_insn;
226 /* The next three variables are used to track when a comparison between a
227 quantity and some constant or register has been passed. In that case, we
228 know the results of the comparison in case we see it again. These variables
229 record a comparison that is known to be true. */
231 /* Indexed by qty number, gives the rtx code of a comparison with a known
232 result involving this quantity. If none, it is UNKNOWN. */
233 static enum rtx_code *qty_comparison_code;
235 /* Indexed by qty number, gives the constant being compared against in a
236 comparison of known result. If no such comparison, it is undefined.
237 If the comparison is not with a constant, it is zero. */
239 static rtx *qty_comparison_const;
241 /* Indexed by qty number, gives the quantity being compared against in a
242 comparison of known result. If no such comparison, if it undefined.
243 If the comparison is not with a register, it is -1. */
245 static int *qty_comparison_qty;
247 #ifdef HAVE_cc0
248 /* For machines that have a CC0, we do not record its value in the hash
249 table since its use is guaranteed to be the insn immediately following
250 its definition and any other insn is presumed to invalidate it.
252 Instead, we store below the value last assigned to CC0. If it should
253 happen to be a constant, it is stored in preference to the actual
254 assigned value. In case it is a constant, we store the mode in which
255 the constant should be interpreted. */
257 static rtx prev_insn_cc0;
258 static enum machine_mode prev_insn_cc0_mode;
259 #endif
261 /* Previous actual insn. 0 if at first insn of basic block. */
263 static rtx prev_insn;
265 /* Insn being scanned. */
267 static rtx this_insn;
269 /* Index by (pseudo) register number, gives the quantity number
270 of the register's current contents. */
272 static int *reg_qty;
274 /* Index by (pseudo) register number, gives the number of the next (or
275 previous) (pseudo) register in the chain of registers sharing the same
276 value.
278 Or -1 if this register is at the end of the chain.
280 If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
282 static int *reg_next_eqv;
283 static int *reg_prev_eqv;
285 /* Index by (pseudo) register number, gives the number of times
286 that register has been altered in the current basic block. */
288 static int *reg_tick;
290 /* Index by (pseudo) register number, gives the reg_tick value at which
291 rtx's containing this register are valid in the hash table.
292 If this does not equal the current reg_tick value, such expressions
293 existing in the hash table are invalid.
294 If this is -1, no expressions containing this register have been
295 entered in the table. */
297 static int *reg_in_table;
299 /* A HARD_REG_SET containing all the hard registers for which there is
300 currently a REG expression in the hash table. Note the difference
301 from the above variables, which indicate if the REG is mentioned in some
302 expression in the table. */
304 static HARD_REG_SET hard_regs_in_table;
306 /* A HARD_REG_SET containing all the hard registers that are invalidated
307 by a CALL_INSN. */
309 static HARD_REG_SET regs_invalidated_by_call;
311 /* Two vectors of ints:
312 one containing max_reg -1's; the other max_reg + 500 (an approximation
313 for max_qty) elements where element i contains i.
314 These are used to initialize various other vectors fast. */
316 static int *all_minus_one;
317 static int *consec_ints;
319 /* CUID of insn that starts the basic block currently being cse-processed. */
321 static int cse_basic_block_start;
323 /* CUID of insn that ends the basic block currently being cse-processed. */
325 static int cse_basic_block_end;
327 /* Vector mapping INSN_UIDs to cuids.
328 The cuids are like uids but increase monotonically always.
329 We use them to see whether a reg is used outside a given basic block. */
331 static int *uid_cuid;
333 /* Highest UID in UID_CUID. */
334 static int max_uid;
336 /* Get the cuid of an insn. */
338 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
340 /* Nonzero if cse has altered conditional jump insns
341 in such a way that jump optimization should be redone. */
343 static int cse_jumps_altered;
345 /* canon_hash stores 1 in do_not_record
346 if it notices a reference to CC0, PC, or some other volatile
347 subexpression. */
349 static int do_not_record;
351 #ifdef LOAD_EXTEND_OP
353 /* Scratch rtl used when looking for load-extended copy of a MEM. */
354 static rtx memory_extend_rtx;
355 #endif
357 /* canon_hash stores 1 in hash_arg_in_memory
358 if it notices a reference to memory within the expression being hashed. */
360 static int hash_arg_in_memory;
362 /* canon_hash stores 1 in hash_arg_in_struct
363 if it notices a reference to memory that's part of a structure. */
365 static int hash_arg_in_struct;
367 /* The hash table contains buckets which are chains of `struct table_elt's,
368 each recording one expression's information.
369 That expression is in the `exp' field.
371 Those elements with the same hash code are chained in both directions
372 through the `next_same_hash' and `prev_same_hash' fields.
374 Each set of expressions with equivalent values
375 are on a two-way chain through the `next_same_value'
376 and `prev_same_value' fields, and all point with
377 the `first_same_value' field at the first element in
378 that chain. The chain is in order of increasing cost.
379 Each element's cost value is in its `cost' field.
381 The `in_memory' field is nonzero for elements that
382 involve any reference to memory. These elements are removed
383 whenever a write is done to an unidentified location in memory.
384 To be safe, we assume that a memory address is unidentified unless
385 the address is either a symbol constant or a constant plus
386 the frame pointer or argument pointer.
388 The `in_struct' field is nonzero for elements that
389 involve any reference to memory inside a structure or array.
391 The `related_value' field is used to connect related expressions
392 (that differ by adding an integer).
393 The related expressions are chained in a circular fashion.
394 `related_value' is zero for expressions for which this
395 chain is not useful.
397 The `cost' field stores the cost of this element's expression.
399 The `is_const' flag is set if the element is a constant (including
400 a fixed address).
402 The `flag' field is used as a temporary during some search routines.
404 The `mode' field is usually the same as GET_MODE (`exp'), but
405 if `exp' is a CONST_INT and has no machine mode then the `mode'
406 field is the mode it was being used as. Each constant is
407 recorded separately for each mode it is used with. */
410 struct table_elt
412 rtx exp;
413 struct table_elt *next_same_hash;
414 struct table_elt *prev_same_hash;
415 struct table_elt *next_same_value;
416 struct table_elt *prev_same_value;
417 struct table_elt *first_same_value;
418 struct table_elt *related_value;
419 int cost;
420 enum machine_mode mode;
421 char in_memory;
422 char in_struct;
423 char is_const;
424 char flag;
427 /* We don't want a lot of buckets, because we rarely have very many
428 things stored in the hash table, and a lot of buckets slows
429 down a lot of loops that happen frequently. */
430 #define NBUCKETS 31
432 /* Compute hash code of X in mode M. Special-case case where X is a pseudo
433 register (hard registers may require `do_not_record' to be set). */
435 #define HASH(X, M) \
436 (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
437 ? (((unsigned) REG << 7) + (unsigned) reg_qty[REGNO (X)]) % NBUCKETS \
438 : canon_hash (X, M) % NBUCKETS)
440 /* Determine whether register number N is considered a fixed register for CSE.
441 It is desirable to replace other regs with fixed regs, to reduce need for
442 non-fixed hard regs.
443 A reg wins if it is either the frame pointer or designated as fixed,
444 but not if it is an overlapping register. */
445 #ifdef OVERLAPPING_REGNO_P
446 #define FIXED_REGNO_P(N) \
447 (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
448 || fixed_regs[N] || global_regs[N]) \
449 && ! OVERLAPPING_REGNO_P ((N)))
450 #else
451 #define FIXED_REGNO_P(N) \
452 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
453 || fixed_regs[N] || global_regs[N])
454 #endif
456 /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
457 hard registers and pointers into the frame are the cheapest with a cost
458 of 0. Next come pseudos with a cost of one and other hard registers with
459 a cost of 2. Aside from these special cases, call `rtx_cost'. */
461 #define CHEAP_REGNO(N) \
462 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
463 || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
464 || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
465 || ((N) < FIRST_PSEUDO_REGISTER \
466 && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
468 /* A register is cheap if it is a user variable assigned to the register
469 or if its register number always corresponds to a cheap register. */
471 #define CHEAP_REG(N) \
472 ((REG_USERVAR_P (N) && REGNO (N) < FIRST_PSEUDO_REGISTER) \
473 || CHEAP_REGNO (REGNO (N)))
475 #define COST(X) \
476 (GET_CODE (X) == REG \
477 ? (CHEAP_REG (X) ? 0 \
478 : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
479 : 2) \
480 : rtx_cost (X, SET) * 2)
482 /* Determine if the quantity number for register X represents a valid index
483 into the `qty_...' variables. */
485 #define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
487 static struct table_elt *table[NBUCKETS];
489 /* Chain of `struct table_elt's made so far for this function
490 but currently removed from the table. */
492 static struct table_elt *free_element_chain;
494 /* Number of `struct table_elt' structures made so far for this function. */
496 static int n_elements_made;
498 /* Maximum value `n_elements_made' has had so far in this compilation
499 for functions previously processed. */
501 static int max_elements_made;
503 /* Surviving equivalence class when two equivalence classes are merged
504 by recording the effects of a jump in the last insn. Zero if the
505 last insn was not a conditional jump. */
507 static struct table_elt *last_jump_equiv_class;
509 /* Set to the cost of a constant pool reference if one was found for a
510 symbolic constant. If this was found, it means we should try to
511 convert constants into constant pool entries if they don't fit in
512 the insn. */
514 static int constant_pool_entries_cost;
516 /* Bits describing what kind of values in memory must be invalidated
517 for a particular instruction. If all three bits are zero,
518 no memory refs need to be invalidated. Each bit is more powerful
519 than the preceding ones, and if a bit is set then the preceding
520 bits are also set.
522 Here is how the bits are set:
523 Pushing onto the stack invalidates only the stack pointer,
524 writing at a fixed address invalidates only variable addresses,
525 writing in a structure element at variable address
526 invalidates all but scalar variables,
527 and writing in anything else at variable address invalidates everything. */
529 struct write_data
531 int sp : 1; /* Invalidate stack pointer. */
532 int var : 1; /* Invalidate variable addresses. */
533 int nonscalar : 1; /* Invalidate all but scalar variables. */
534 int all : 1; /* Invalidate all memory refs. */
537 /* Define maximum length of a branch path. */
539 #define PATHLENGTH 10
541 /* This data describes a block that will be processed by cse_basic_block. */
543 struct cse_basic_block_data {
544 /* Lowest CUID value of insns in block. */
545 int low_cuid;
546 /* Highest CUID value of insns in block. */
547 int high_cuid;
548 /* Total number of SETs in block. */
549 int nsets;
550 /* Last insn in the block. */
551 rtx last;
552 /* Size of current branch path, if any. */
553 int path_size;
554 /* Current branch path, indicating which branches will be taken. */
555 struct branch_path {
556 /* The branch insn. */
557 rtx branch;
558 /* Whether it should be taken or not. AROUND is the same as taken
559 except that it is used when the destination label is not preceded
560 by a BARRIER. */
561 enum taken {TAKEN, NOT_TAKEN, AROUND} status;
562 } path[PATHLENGTH];
565 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
566 virtual regs here because the simplify_*_operation routines are called
567 by integrate.c, which is called before virtual register instantiation. */
569 #define FIXED_BASE_PLUS_P(X) \
570 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
571 || (X) == arg_pointer_rtx \
572 || (X) == virtual_stack_vars_rtx \
573 || (X) == virtual_incoming_args_rtx \
574 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
575 && (XEXP (X, 0) == frame_pointer_rtx \
576 || XEXP (X, 0) == hard_frame_pointer_rtx \
577 || XEXP (X, 0) == arg_pointer_rtx \
578 || XEXP (X, 0) == virtual_stack_vars_rtx \
579 || XEXP (X, 0) == virtual_incoming_args_rtx)))
581 /* Similar, but also allows reference to the stack pointer.
583 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
584 arg_pointer_rtx by itself is nonzero, because on at least one machine,
585 the i960, the arg pointer is zero when it is unused. */
587 #define NONZERO_BASE_PLUS_P(X) \
588 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
589 || (X) == virtual_stack_vars_rtx \
590 || (X) == virtual_incoming_args_rtx \
591 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
592 && (XEXP (X, 0) == frame_pointer_rtx \
593 || XEXP (X, 0) == hard_frame_pointer_rtx \
594 || XEXP (X, 0) == arg_pointer_rtx \
595 || XEXP (X, 0) == virtual_stack_vars_rtx \
596 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
597 || (X) == stack_pointer_rtx \
598 || (X) == virtual_stack_dynamic_rtx \
599 || (X) == virtual_outgoing_args_rtx \
600 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
601 && (XEXP (X, 0) == stack_pointer_rtx \
602 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
603 || XEXP (X, 0) == virtual_outgoing_args_rtx)))
605 static void new_basic_block PROTO((void));
606 static void make_new_qty PROTO((int));
607 static void make_regs_eqv PROTO((int, int));
608 static void delete_reg_equiv PROTO((int));
609 static int mention_regs PROTO((rtx));
610 static int insert_regs PROTO((rtx, struct table_elt *, int));
611 static void free_element PROTO((struct table_elt *));
612 static void remove_from_table PROTO((struct table_elt *, unsigned));
613 static struct table_elt *get_element PROTO((void));
614 static struct table_elt *lookup PROTO((rtx, unsigned, enum machine_mode)),
615 *lookup_for_remove PROTO((rtx, unsigned, enum machine_mode));
616 static rtx lookup_as_function PROTO((rtx, enum rtx_code));
617 static struct table_elt *insert PROTO((rtx, struct table_elt *, unsigned,
618 enum machine_mode));
619 static void merge_equiv_classes PROTO((struct table_elt *,
620 struct table_elt *));
621 static void invalidate PROTO((rtx, enum machine_mode));
622 static void remove_invalid_refs PROTO((int));
623 static void rehash_using_reg PROTO((rtx));
624 static void invalidate_memory PROTO((struct write_data *));
625 static void invalidate_for_call PROTO((void));
626 static rtx use_related_value PROTO((rtx, struct table_elt *));
627 static unsigned canon_hash PROTO((rtx, enum machine_mode));
628 static unsigned safe_hash PROTO((rtx, enum machine_mode));
629 static int exp_equiv_p PROTO((rtx, rtx, int, int));
630 static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
631 HOST_WIDE_INT *,
632 HOST_WIDE_INT *));
633 static int refers_to_p PROTO((rtx, rtx));
634 static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT,
635 HOST_WIDE_INT));
636 static int cse_rtx_addr_varies_p PROTO((rtx));
637 static rtx canon_reg PROTO((rtx, rtx));
638 static void find_best_addr PROTO((rtx, rtx *));
639 static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
640 enum machine_mode *,
641 enum machine_mode *));
642 static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
643 rtx, rtx));
644 static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
645 rtx, rtx));
646 static rtx fold_rtx PROTO((rtx, rtx));
647 static rtx equiv_constant PROTO((rtx));
648 static void record_jump_equiv PROTO((rtx, int));
649 static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
650 rtx, rtx, int));
651 static void cse_insn PROTO((rtx, int));
652 static void note_mem_written PROTO((rtx, struct write_data *));
653 static void invalidate_from_clobbers PROTO((struct write_data *, rtx));
654 static rtx cse_process_notes PROTO((rtx, rtx));
655 static void cse_around_loop PROTO((rtx));
656 static void invalidate_skipped_set PROTO((rtx, rtx));
657 static void invalidate_skipped_block PROTO((rtx));
658 static void cse_check_loop_start PROTO((rtx, rtx));
659 static void cse_set_around_loop PROTO((rtx, rtx, rtx));
660 static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
661 static void count_reg_usage PROTO((rtx, int *, rtx, int));
663 extern int rtx_equal_function_value_matters;
665 /* Return an estimate of the cost of computing rtx X.
666 One use is in cse, to decide which expression to keep in the hash table.
667 Another is in rtl generation, to pick the cheapest way to multiply.
668 Other uses like the latter are expected in the future. */
670 /* Return the right cost to give to an operation
671 to make the cost of the corresponding register-to-register instruction
672 N times that of a fast register-to-register instruction. */
674 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
677 rtx_cost (x, outer_code)
678 rtx x;
679 enum rtx_code outer_code;
681 register int i, j;
682 register enum rtx_code code;
683 register char *fmt;
684 register int total;
686 if (x == 0)
687 return 0;
689 /* Compute the default costs of certain things.
690 Note that RTX_COSTS can override the defaults. */
692 code = GET_CODE (x);
693 switch (code)
695 case MULT:
696 /* Count multiplication by 2**n as a shift,
697 because if we are considering it, we would output it as a shift. */
698 if (GET_CODE (XEXP (x, 1)) == CONST_INT
699 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
700 total = 2;
701 else
702 total = COSTS_N_INSNS (5);
703 break;
704 case DIV:
705 case UDIV:
706 case MOD:
707 case UMOD:
708 total = COSTS_N_INSNS (7);
709 break;
710 case USE:
711 /* Used in loop.c and combine.c as a marker. */
712 total = 0;
713 break;
714 case ASM_OPERANDS:
715 /* We don't want these to be used in substitutions because
716 we have no way of validating the resulting insn. So assign
717 anything containing an ASM_OPERANDS a very high cost. */
718 total = 1000;
719 break;
720 default:
721 total = 2;
724 switch (code)
726 case REG:
727 return ! CHEAP_REG (x);
729 case SUBREG:
730 /* If we can't tie these modes, make this expensive. The larger
731 the mode, the more expensive it is. */
732 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
733 return COSTS_N_INSNS (2
734 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
735 return 2;
736 #ifdef RTX_COSTS
737 RTX_COSTS (x, code, outer_code);
738 #endif
739 CONST_COSTS (x, code, outer_code);
742 /* Sum the costs of the sub-rtx's, plus cost of this operation,
743 which is already in total. */
745 fmt = GET_RTX_FORMAT (code);
746 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
747 if (fmt[i] == 'e')
748 total += rtx_cost (XEXP (x, i), code);
749 else if (fmt[i] == 'E')
750 for (j = 0; j < XVECLEN (x, i); j++)
751 total += rtx_cost (XVECEXP (x, i, j), code);
753 return total;
756 /* Clear the hash table and initialize each register with its own quantity,
757 for a new basic block. */
759 static void
760 new_basic_block ()
762 register int i;
764 next_qty = max_reg;
766 bzero ((char *) reg_tick, max_reg * sizeof (int));
768 bcopy ((char *) all_minus_one, (char *) reg_in_table,
769 max_reg * sizeof (int));
770 bcopy ((char *) consec_ints, (char *) reg_qty, max_reg * sizeof (int));
771 CLEAR_HARD_REG_SET (hard_regs_in_table);
773 /* The per-quantity values used to be initialized here, but it is
774 much faster to initialize each as it is made in `make_new_qty'. */
776 for (i = 0; i < NBUCKETS; i++)
778 register struct table_elt *this, *next;
779 for (this = table[i]; this; this = next)
781 next = this->next_same_hash;
782 free_element (this);
786 bzero ((char *) table, sizeof table);
788 prev_insn = 0;
790 #ifdef HAVE_cc0
791 prev_insn_cc0 = 0;
792 #endif
795 /* Say that register REG contains a quantity not in any register before
796 and initialize that quantity. */
798 static void
799 make_new_qty (reg)
800 register int reg;
802 register int q;
804 if (next_qty >= max_qty)
805 abort ();
807 q = reg_qty[reg] = next_qty++;
808 qty_first_reg[q] = reg;
809 qty_last_reg[q] = reg;
810 qty_const[q] = qty_const_insn[q] = 0;
811 qty_comparison_code[q] = UNKNOWN;
813 reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
816 /* Make reg NEW equivalent to reg OLD.
817 OLD is not changing; NEW is. */
819 static void
820 make_regs_eqv (new, old)
821 register int new, old;
823 register int lastr, firstr;
824 register int q = reg_qty[old];
826 /* Nothing should become eqv until it has a "non-invalid" qty number. */
827 if (! REGNO_QTY_VALID_P (old))
828 abort ();
830 reg_qty[new] = q;
831 firstr = qty_first_reg[q];
832 lastr = qty_last_reg[q];
834 /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
835 hard regs. Among pseudos, if NEW will live longer than any other reg
836 of the same qty, and that is beyond the current basic block,
837 make it the new canonical replacement for this qty. */
838 if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
839 /* Certain fixed registers might be of the class NO_REGS. This means
840 that not only can they not be allocated by the compiler, but
841 they cannot be used in substitutions or canonicalizations
842 either. */
843 && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
844 && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
845 || (new >= FIRST_PSEUDO_REGISTER
846 && (firstr < FIRST_PSEUDO_REGISTER
847 || ((uid_cuid[regno_last_uid[new]] > cse_basic_block_end
848 || (uid_cuid[regno_first_uid[new]]
849 < cse_basic_block_start))
850 && (uid_cuid[regno_last_uid[new]]
851 > uid_cuid[regno_last_uid[firstr]]))))))
853 reg_prev_eqv[firstr] = new;
854 reg_next_eqv[new] = firstr;
855 reg_prev_eqv[new] = -1;
856 qty_first_reg[q] = new;
858 else
860 /* If NEW is a hard reg (known to be non-fixed), insert at end.
861 Otherwise, insert before any non-fixed hard regs that are at the
862 end. Registers of class NO_REGS cannot be used as an
863 equivalent for anything. */
864 while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
865 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
866 && new >= FIRST_PSEUDO_REGISTER)
867 lastr = reg_prev_eqv[lastr];
868 reg_next_eqv[new] = reg_next_eqv[lastr];
869 if (reg_next_eqv[lastr] >= 0)
870 reg_prev_eqv[reg_next_eqv[lastr]] = new;
871 else
872 qty_last_reg[q] = new;
873 reg_next_eqv[lastr] = new;
874 reg_prev_eqv[new] = lastr;
878 /* Remove REG from its equivalence class. */
880 static void
881 delete_reg_equiv (reg)
882 register int reg;
884 register int q = reg_qty[reg];
885 register int p, n;
887 /* If invalid, do nothing. */
888 if (q == reg)
889 return;
891 p = reg_prev_eqv[reg];
892 n = reg_next_eqv[reg];
894 if (n != -1)
895 reg_prev_eqv[n] = p;
896 else
897 qty_last_reg[q] = p;
898 if (p != -1)
899 reg_next_eqv[p] = n;
900 else
901 qty_first_reg[q] = n;
903 reg_qty[reg] = reg;
906 /* Remove any invalid expressions from the hash table
907 that refer to any of the registers contained in expression X.
909 Make sure that newly inserted references to those registers
910 as subexpressions will be considered valid.
912 mention_regs is not called when a register itself
913 is being stored in the table.
915 Return 1 if we have done something that may have changed the hash code
916 of X. */
918 static int
919 mention_regs (x)
920 rtx x;
922 register enum rtx_code code;
923 register int i, j;
924 register char *fmt;
925 register int changed = 0;
927 if (x == 0)
928 return 0;
930 code = GET_CODE (x);
931 if (code == REG)
933 register int regno = REGNO (x);
934 register int endregno
935 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
936 : HARD_REGNO_NREGS (regno, GET_MODE (x)));
937 int i;
939 for (i = regno; i < endregno; i++)
941 if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
942 remove_invalid_refs (i);
944 reg_in_table[i] = reg_tick[i];
947 return 0;
950 /* If X is a comparison or a COMPARE and either operand is a register
951 that does not have a quantity, give it one. This is so that a later
952 call to record_jump_equiv won't cause X to be assigned a different
953 hash code and not found in the table after that call.
955 It is not necessary to do this here, since rehash_using_reg can
956 fix up the table later, but doing this here eliminates the need to
957 call that expensive function in the most common case where the only
958 use of the register is in the comparison. */
960 if (code == COMPARE || GET_RTX_CLASS (code) == '<')
962 if (GET_CODE (XEXP (x, 0)) == REG
963 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
964 if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
966 rehash_using_reg (XEXP (x, 0));
967 changed = 1;
970 if (GET_CODE (XEXP (x, 1)) == REG
971 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
972 if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
974 rehash_using_reg (XEXP (x, 1));
975 changed = 1;
979 fmt = GET_RTX_FORMAT (code);
980 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
981 if (fmt[i] == 'e')
982 changed |= mention_regs (XEXP (x, i));
983 else if (fmt[i] == 'E')
984 for (j = 0; j < XVECLEN (x, i); j++)
985 changed |= mention_regs (XVECEXP (x, i, j));
987 return changed;
990 /* Update the register quantities for inserting X into the hash table
991 with a value equivalent to CLASSP.
992 (If the class does not contain a REG, it is irrelevant.)
993 If MODIFIED is nonzero, X is a destination; it is being modified.
994 Note that delete_reg_equiv should be called on a register
995 before insert_regs is done on that register with MODIFIED != 0.
997 Nonzero value means that elements of reg_qty have changed
998 so X's hash code may be different. */
1000 static int
1001 insert_regs (x, classp, modified)
1002 rtx x;
1003 struct table_elt *classp;
1004 int modified;
1006 if (GET_CODE (x) == REG)
1008 register int regno = REGNO (x);
1010 /* If REGNO is in the equivalence table already but is of the
1011 wrong mode for that equivalence, don't do anything here. */
1013 if (REGNO_QTY_VALID_P (regno)
1014 && qty_mode[reg_qty[regno]] != GET_MODE (x))
1015 return 0;
1017 if (modified || ! REGNO_QTY_VALID_P (regno))
1019 if (classp)
1020 for (classp = classp->first_same_value;
1021 classp != 0;
1022 classp = classp->next_same_value)
1023 if (GET_CODE (classp->exp) == REG
1024 && GET_MODE (classp->exp) == GET_MODE (x))
1026 make_regs_eqv (regno, REGNO (classp->exp));
1027 return 1;
1030 make_new_qty (regno);
1031 qty_mode[reg_qty[regno]] = GET_MODE (x);
1032 return 1;
1035 return 0;
1038 /* If X is a SUBREG, we will likely be inserting the inner register in the
1039 table. If that register doesn't have an assigned quantity number at
1040 this point but does later, the insertion that we will be doing now will
1041 not be accessible because its hash code will have changed. So assign
1042 a quantity number now. */
1044 else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
1045 && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
1047 insert_regs (SUBREG_REG (x), NULL_PTR, 0);
1048 mention_regs (SUBREG_REG (x));
1049 return 1;
1051 else
1052 return mention_regs (x);
1055 /* Look in or update the hash table. */
1057 /* Put the element ELT on the list of free elements. */
1059 static void
1060 free_element (elt)
1061 struct table_elt *elt;
1063 elt->next_same_hash = free_element_chain;
1064 free_element_chain = elt;
1067 /* Return an element that is free for use. */
1069 static struct table_elt *
1070 get_element ()
1072 struct table_elt *elt = free_element_chain;
1073 if (elt)
1075 free_element_chain = elt->next_same_hash;
1076 return elt;
1078 n_elements_made++;
1079 return (struct table_elt *) oballoc (sizeof (struct table_elt));
1082 /* Remove table element ELT from use in the table.
1083 HASH is its hash code, made using the HASH macro.
1084 It's an argument because often that is known in advance
1085 and we save much time not recomputing it. */
1087 static void
1088 remove_from_table (elt, hash)
1089 register struct table_elt *elt;
1090 unsigned hash;
1092 if (elt == 0)
1093 return;
1095 /* Mark this element as removed. See cse_insn. */
1096 elt->first_same_value = 0;
1098 /* Remove the table element from its equivalence class. */
1101 register struct table_elt *prev = elt->prev_same_value;
1102 register struct table_elt *next = elt->next_same_value;
1104 if (next) next->prev_same_value = prev;
1106 if (prev)
1107 prev->next_same_value = next;
1108 else
1110 register struct table_elt *newfirst = next;
1111 while (next)
1113 next->first_same_value = newfirst;
1114 next = next->next_same_value;
1119 /* Remove the table element from its hash bucket. */
1122 register struct table_elt *prev = elt->prev_same_hash;
1123 register struct table_elt *next = elt->next_same_hash;
1125 if (next) next->prev_same_hash = prev;
1127 if (prev)
1128 prev->next_same_hash = next;
1129 else if (table[hash] == elt)
1130 table[hash] = next;
1131 else
1133 /* This entry is not in the proper hash bucket. This can happen
1134 when two classes were merged by `merge_equiv_classes'. Search
1135 for the hash bucket that it heads. This happens only very
1136 rarely, so the cost is acceptable. */
1137 for (hash = 0; hash < NBUCKETS; hash++)
1138 if (table[hash] == elt)
1139 table[hash] = next;
1143 /* Remove the table element from its related-value circular chain. */
1145 if (elt->related_value != 0 && elt->related_value != elt)
1147 register struct table_elt *p = elt->related_value;
1148 while (p->related_value != elt)
1149 p = p->related_value;
1150 p->related_value = elt->related_value;
1151 if (p->related_value == p)
1152 p->related_value = 0;
1155 free_element (elt);
1158 /* Look up X in the hash table and return its table element,
1159 or 0 if X is not in the table.
1161 MODE is the machine-mode of X, or if X is an integer constant
1162 with VOIDmode then MODE is the mode with which X will be used.
1164 Here we are satisfied to find an expression whose tree structure
1165 looks like X. */
1167 static struct table_elt *
1168 lookup (x, hash, mode)
1169 rtx x;
1170 unsigned hash;
1171 enum machine_mode mode;
1173 register struct table_elt *p;
1175 for (p = table[hash]; p; p = p->next_same_hash)
1176 if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
1177 || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
1178 return p;
1180 return 0;
1183 /* Like `lookup' but don't care whether the table element uses invalid regs.
1184 Also ignore discrepancies in the machine mode of a register. */
1186 static struct table_elt *
1187 lookup_for_remove (x, hash, mode)
1188 rtx x;
1189 unsigned hash;
1190 enum machine_mode mode;
1192 register struct table_elt *p;
1194 if (GET_CODE (x) == REG)
1196 int regno = REGNO (x);
1197 /* Don't check the machine mode when comparing registers;
1198 invalidating (REG:SI 0) also invalidates (REG:DF 0). */
1199 for (p = table[hash]; p; p = p->next_same_hash)
1200 if (GET_CODE (p->exp) == REG
1201 && REGNO (p->exp) == regno)
1202 return p;
1204 else
1206 for (p = table[hash]; p; p = p->next_same_hash)
1207 if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
1208 return p;
1211 return 0;
1214 /* Look for an expression equivalent to X and with code CODE.
1215 If one is found, return that expression. */
1217 static rtx
1218 lookup_as_function (x, code)
1219 rtx x;
1220 enum rtx_code code;
1222 register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
1223 GET_MODE (x));
1224 if (p == 0)
1225 return 0;
1227 for (p = p->first_same_value; p; p = p->next_same_value)
1229 if (GET_CODE (p->exp) == code
1230 /* Make sure this is a valid entry in the table. */
1231 && exp_equiv_p (p->exp, p->exp, 1, 0))
1232 return p->exp;
1235 return 0;
1238 /* Insert X in the hash table, assuming HASH is its hash code
1239 and CLASSP is an element of the class it should go in
1240 (or 0 if a new class should be made).
1241 It is inserted at the proper position to keep the class in
1242 the order cheapest first.
1244 MODE is the machine-mode of X, or if X is an integer constant
1245 with VOIDmode then MODE is the mode with which X will be used.
1247 For elements of equal cheapness, the most recent one
1248 goes in front, except that the first element in the list
1249 remains first unless a cheaper element is added. The order of
1250 pseudo-registers does not matter, as canon_reg will be called to
1251 find the cheapest when a register is retrieved from the table.
1253 The in_memory field in the hash table element is set to 0.
1254 The caller must set it nonzero if appropriate.
1256 You should call insert_regs (X, CLASSP, MODIFY) before calling here,
1257 and if insert_regs returns a nonzero value
1258 you must then recompute its hash code before calling here.
1260 If necessary, update table showing constant values of quantities. */
1262 #define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
1264 static struct table_elt *
1265 insert (x, classp, hash, mode)
1266 register rtx x;
1267 register struct table_elt *classp;
1268 unsigned hash;
1269 enum machine_mode mode;
1271 register struct table_elt *elt;
1273 /* If X is a register and we haven't made a quantity for it,
1274 something is wrong. */
1275 if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
1276 abort ();
1278 /* If X is a hard register, show it is being put in the table. */
1279 if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
1281 int regno = REGNO (x);
1282 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1283 int i;
1285 for (i = regno; i < endregno; i++)
1286 SET_HARD_REG_BIT (hard_regs_in_table, i);
1290 /* Put an element for X into the right hash bucket. */
1292 elt = get_element ();
1293 elt->exp = x;
1294 elt->cost = COST (x);
1295 elt->next_same_value = 0;
1296 elt->prev_same_value = 0;
1297 elt->next_same_hash = table[hash];
1298 elt->prev_same_hash = 0;
1299 elt->related_value = 0;
1300 elt->in_memory = 0;
1301 elt->mode = mode;
1302 elt->is_const = (CONSTANT_P (x)
1303 /* GNU C++ takes advantage of this for `this'
1304 (and other const values). */
1305 || (RTX_UNCHANGING_P (x)
1306 && GET_CODE (x) == REG
1307 && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1308 || FIXED_BASE_PLUS_P (x));
1310 if (table[hash])
1311 table[hash]->prev_same_hash = elt;
1312 table[hash] = elt;
1314 /* Put it into the proper value-class. */
1315 if (classp)
1317 classp = classp->first_same_value;
1318 if (CHEAPER (elt, classp))
1319 /* Insert at the head of the class */
1321 register struct table_elt *p;
1322 elt->next_same_value = classp;
1323 classp->prev_same_value = elt;
1324 elt->first_same_value = elt;
1326 for (p = classp; p; p = p->next_same_value)
1327 p->first_same_value = elt;
1329 else
1331 /* Insert not at head of the class. */
1332 /* Put it after the last element cheaper than X. */
1333 register struct table_elt *p, *next;
1334 for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
1335 p = next);
1336 /* Put it after P and before NEXT. */
1337 elt->next_same_value = next;
1338 if (next)
1339 next->prev_same_value = elt;
1340 elt->prev_same_value = p;
1341 p->next_same_value = elt;
1342 elt->first_same_value = classp;
1345 else
1346 elt->first_same_value = elt;
1348 /* If this is a constant being set equivalent to a register or a register
1349 being set equivalent to a constant, note the constant equivalence.
1351 If this is a constant, it cannot be equivalent to a different constant,
1352 and a constant is the only thing that can be cheaper than a register. So
1353 we know the register is the head of the class (before the constant was
1354 inserted).
1356 If this is a register that is not already known equivalent to a
1357 constant, we must check the entire class.
1359 If this is a register that is already known equivalent to an insn,
1360 update `qty_const_insn' to show that `this_insn' is the latest
1361 insn making that quantity equivalent to the constant. */
1363 if (elt->is_const && classp && GET_CODE (classp->exp) == REG)
1365 qty_const[reg_qty[REGNO (classp->exp)]]
1366 = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
1367 qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
1370 else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]])
1372 register struct table_elt *p;
1374 for (p = classp; p != 0; p = p->next_same_value)
1376 if (p->is_const)
1378 qty_const[reg_qty[REGNO (x)]]
1379 = gen_lowpart_if_possible (GET_MODE (x), p->exp);
1380 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1381 break;
1386 else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
1387 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
1388 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1390 /* If this is a constant with symbolic value,
1391 and it has a term with an explicit integer value,
1392 link it up with related expressions. */
1393 if (GET_CODE (x) == CONST)
1395 rtx subexp = get_related_value (x);
1396 unsigned subhash;
1397 struct table_elt *subelt, *subelt_prev;
1399 if (subexp != 0)
1401 /* Get the integer-free subexpression in the hash table. */
1402 subhash = safe_hash (subexp, mode) % NBUCKETS;
1403 subelt = lookup (subexp, subhash, mode);
1404 if (subelt == 0)
1405 subelt = insert (subexp, NULL_PTR, subhash, mode);
1406 /* Initialize SUBELT's circular chain if it has none. */
1407 if (subelt->related_value == 0)
1408 subelt->related_value = subelt;
1409 /* Find the element in the circular chain that precedes SUBELT. */
1410 subelt_prev = subelt;
1411 while (subelt_prev->related_value != subelt)
1412 subelt_prev = subelt_prev->related_value;
1413 /* Put new ELT into SUBELT's circular chain just before SUBELT.
1414 This way the element that follows SUBELT is the oldest one. */
1415 elt->related_value = subelt_prev->related_value;
1416 subelt_prev->related_value = elt;
1420 return elt;
1423 /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
1424 CLASS2 into CLASS1. This is done when we have reached an insn which makes
1425 the two classes equivalent.
1427 CLASS1 will be the surviving class; CLASS2 should not be used after this
1428 call.
1430 Any invalid entries in CLASS2 will not be copied. */
1432 static void
1433 merge_equiv_classes (class1, class2)
1434 struct table_elt *class1, *class2;
1436 struct table_elt *elt, *next, *new;
1438 /* Ensure we start with the head of the classes. */
1439 class1 = class1->first_same_value;
1440 class2 = class2->first_same_value;
1442 /* If they were already equal, forget it. */
1443 if (class1 == class2)
1444 return;
1446 for (elt = class2; elt; elt = next)
1448 unsigned hash;
1449 rtx exp = elt->exp;
1450 enum machine_mode mode = elt->mode;
1452 next = elt->next_same_value;
1454 /* Remove old entry, make a new one in CLASS1's class.
1455 Don't do this for invalid entries as we cannot find their
1456 hash code (it also isn't necessary). */
1457 if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
1459 hash_arg_in_memory = 0;
1460 hash_arg_in_struct = 0;
1461 hash = HASH (exp, mode);
1463 if (GET_CODE (exp) == REG)
1464 delete_reg_equiv (REGNO (exp));
1466 remove_from_table (elt, hash);
1468 if (insert_regs (exp, class1, 0))
1470 rehash_using_reg (exp);
1471 hash = HASH (exp, mode);
1473 new = insert (exp, class1, hash, mode);
1474 new->in_memory = hash_arg_in_memory;
1475 new->in_struct = hash_arg_in_struct;
1480 /* Remove from the hash table, or mark as invalid,
1481 all expressions whose values could be altered by storing in X.
1482 X is a register, a subreg, or a memory reference with nonvarying address
1483 (because, when a memory reference with a varying address is stored in,
1484 all memory references are removed by invalidate_memory
1485 so specific invalidation is superfluous).
1486 FULL_MODE, if not VOIDmode, indicates that this much should be invalidated
1487 instead of just the amount indicated by the mode of X. This is only used
1488 for bitfield stores into memory.
1490 A nonvarying address may be just a register or just
1491 a symbol reference, or it may be either of those plus
1492 a numeric offset. */
1494 static void
1495 invalidate (x, full_mode)
1496 rtx x;
1497 enum machine_mode full_mode;
1499 register int i;
1500 register struct table_elt *p;
1501 rtx base;
1502 HOST_WIDE_INT start, end;
1504 /* If X is a register, dependencies on its contents
1505 are recorded through the qty number mechanism.
1506 Just change the qty number of the register,
1507 mark it as invalid for expressions that refer to it,
1508 and remove it itself. */
1510 if (GET_CODE (x) == REG)
1512 register int regno = REGNO (x);
1513 register unsigned hash = HASH (x, GET_MODE (x));
1515 /* Remove REGNO from any quantity list it might be on and indicate
1516 that it's value might have changed. If it is a pseudo, remove its
1517 entry from the hash table.
1519 For a hard register, we do the first two actions above for any
1520 additional hard registers corresponding to X. Then, if any of these
1521 registers are in the table, we must remove any REG entries that
1522 overlap these registers. */
1524 delete_reg_equiv (regno);
1525 reg_tick[regno]++;
1527 if (regno >= FIRST_PSEUDO_REGISTER)
1528 remove_from_table (lookup_for_remove (x, hash, GET_MODE (x)), hash);
1529 else
1531 HOST_WIDE_INT in_table
1532 = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
1533 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1534 int tregno, tendregno;
1535 register struct table_elt *p, *next;
1537 CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
1539 for (i = regno + 1; i < endregno; i++)
1541 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
1542 CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
1543 delete_reg_equiv (i);
1544 reg_tick[i]++;
1547 if (in_table)
1548 for (hash = 0; hash < NBUCKETS; hash++)
1549 for (p = table[hash]; p; p = next)
1551 next = p->next_same_hash;
1553 if (GET_CODE (p->exp) != REG
1554 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1555 continue;
1557 tregno = REGNO (p->exp);
1558 tendregno
1559 = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
1560 if (tendregno > regno && tregno < endregno)
1561 remove_from_table (p, hash);
1565 return;
1568 if (GET_CODE (x) == SUBREG)
1570 if (GET_CODE (SUBREG_REG (x)) != REG)
1571 abort ();
1572 invalidate (SUBREG_REG (x), VOIDmode);
1573 return;
1576 /* X is not a register; it must be a memory reference with
1577 a nonvarying address. Remove all hash table elements
1578 that refer to overlapping pieces of memory. */
1580 if (GET_CODE (x) != MEM)
1581 abort ();
1583 if (full_mode == VOIDmode)
1584 full_mode = GET_MODE (x);
1586 set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (full_mode),
1587 &base, &start, &end);
1589 for (i = 0; i < NBUCKETS; i++)
1591 register struct table_elt *next;
1592 for (p = table[i]; p; p = next)
1594 next = p->next_same_hash;
1595 if (refers_to_mem_p (p->exp, base, start, end))
1596 remove_from_table (p, i);
1601 /* Remove all expressions that refer to register REGNO,
1602 since they are already invalid, and we are about to
1603 mark that register valid again and don't want the old
1604 expressions to reappear as valid. */
1606 static void
1607 remove_invalid_refs (regno)
1608 int regno;
1610 register int i;
1611 register struct table_elt *p, *next;
1613 for (i = 0; i < NBUCKETS; i++)
1614 for (p = table[i]; p; p = next)
1616 next = p->next_same_hash;
1617 if (GET_CODE (p->exp) != REG
1618 && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
1619 remove_from_table (p, i);
1623 /* Recompute the hash codes of any valid entries in the hash table that
1624 reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
1626 This is called when we make a jump equivalence. */
1628 static void
1629 rehash_using_reg (x)
1630 rtx x;
1632 int i;
1633 struct table_elt *p, *next;
1634 unsigned hash;
1636 if (GET_CODE (x) == SUBREG)
1637 x = SUBREG_REG (x);
1639 /* If X is not a register or if the register is known not to be in any
1640 valid entries in the table, we have no work to do. */
1642 if (GET_CODE (x) != REG
1643 || reg_in_table[REGNO (x)] < 0
1644 || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
1645 return;
1647 /* Scan all hash chains looking for valid entries that mention X.
1648 If we find one and it is in the wrong hash chain, move it. We can skip
1649 objects that are registers, since they are handled specially. */
1651 for (i = 0; i < NBUCKETS; i++)
1652 for (p = table[i]; p; p = next)
1654 next = p->next_same_hash;
1655 if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
1656 && exp_equiv_p (p->exp, p->exp, 1, 0)
1657 && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
1659 if (p->next_same_hash)
1660 p->next_same_hash->prev_same_hash = p->prev_same_hash;
1662 if (p->prev_same_hash)
1663 p->prev_same_hash->next_same_hash = p->next_same_hash;
1664 else
1665 table[i] = p->next_same_hash;
1667 p->next_same_hash = table[hash];
1668 p->prev_same_hash = 0;
1669 if (table[hash])
1670 table[hash]->prev_same_hash = p;
1671 table[hash] = p;
1676 /* Remove from the hash table all expressions that reference memory,
1677 or some of them as specified by *WRITES. */
1679 static void
1680 invalidate_memory (writes)
1681 struct write_data *writes;
1683 register int i;
1684 register struct table_elt *p, *next;
1685 int all = writes->all;
1686 int nonscalar = writes->nonscalar;
1688 for (i = 0; i < NBUCKETS; i++)
1689 for (p = table[i]; p; p = next)
1691 next = p->next_same_hash;
1692 if (p->in_memory
1693 && (all
1694 || (nonscalar && p->in_struct)
1695 || cse_rtx_addr_varies_p (p->exp)))
1696 remove_from_table (p, i);
1700 /* Remove from the hash table any expression that is a call-clobbered
1701 register. Also update their TICK values. */
1703 static void
1704 invalidate_for_call ()
1706 int regno, endregno;
1707 int i;
1708 unsigned hash;
1709 struct table_elt *p, *next;
1710 int in_table = 0;
1712 /* Go through all the hard registers. For each that is clobbered in
1713 a CALL_INSN, remove the register from quantity chains and update
1714 reg_tick if defined. Also see if any of these registers is currently
1715 in the table. */
1717 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1718 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1720 delete_reg_equiv (regno);
1721 if (reg_tick[regno] >= 0)
1722 reg_tick[regno]++;
1724 in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
1727 /* In the case where we have no call-clobbered hard registers in the
1728 table, we are done. Otherwise, scan the table and remove any
1729 entry that overlaps a call-clobbered register. */
1731 if (in_table)
1732 for (hash = 0; hash < NBUCKETS; hash++)
1733 for (p = table[hash]; p; p = next)
1735 next = p->next_same_hash;
1737 if (GET_CODE (p->exp) != REG
1738 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1739 continue;
1741 regno = REGNO (p->exp);
1742 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
1744 for (i = regno; i < endregno; i++)
1745 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1747 remove_from_table (p, hash);
1748 break;
1753 /* Given an expression X of type CONST,
1754 and ELT which is its table entry (or 0 if it
1755 is not in the hash table),
1756 return an alternate expression for X as a register plus integer.
1757 If none can be found, return 0. */
1759 static rtx
1760 use_related_value (x, elt)
1761 rtx x;
1762 struct table_elt *elt;
1764 register struct table_elt *relt = 0;
1765 register struct table_elt *p, *q;
1766 HOST_WIDE_INT offset;
1768 /* First, is there anything related known?
1769 If we have a table element, we can tell from that.
1770 Otherwise, must look it up. */
1772 if (elt != 0 && elt->related_value != 0)
1773 relt = elt;
1774 else if (elt == 0 && GET_CODE (x) == CONST)
1776 rtx subexp = get_related_value (x);
1777 if (subexp != 0)
1778 relt = lookup (subexp,
1779 safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
1780 GET_MODE (subexp));
1783 if (relt == 0)
1784 return 0;
1786 /* Search all related table entries for one that has an
1787 equivalent register. */
1789 p = relt;
1790 while (1)
1792 /* This loop is strange in that it is executed in two different cases.
1793 The first is when X is already in the table. Then it is searching
1794 the RELATED_VALUE list of X's class (RELT). The second case is when
1795 X is not in the table. Then RELT points to a class for the related
1796 value.
1798 Ensure that, whatever case we are in, that we ignore classes that have
1799 the same value as X. */
1801 if (rtx_equal_p (x, p->exp))
1802 q = 0;
1803 else
1804 for (q = p->first_same_value; q; q = q->next_same_value)
1805 if (GET_CODE (q->exp) == REG)
1806 break;
1808 if (q)
1809 break;
1811 p = p->related_value;
1813 /* We went all the way around, so there is nothing to be found.
1814 Alternatively, perhaps RELT was in the table for some other reason
1815 and it has no related values recorded. */
1816 if (p == relt || p == 0)
1817 break;
1820 if (q == 0)
1821 return 0;
1823 offset = (get_integer_term (x) - get_integer_term (p->exp));
1824 /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
1825 return plus_constant (q->exp, offset);
1828 /* Hash an rtx. We are careful to make sure the value is never negative.
1829 Equivalent registers hash identically.
1830 MODE is used in hashing for CONST_INTs only;
1831 otherwise the mode of X is used.
1833 Store 1 in do_not_record if any subexpression is volatile.
1835 Store 1 in hash_arg_in_memory if X contains a MEM rtx
1836 which does not have the RTX_UNCHANGING_P bit set.
1837 In this case, also store 1 in hash_arg_in_struct
1838 if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
1840 Note that cse_insn knows that the hash code of a MEM expression
1841 is just (int) MEM plus the hash code of the address. */
1843 static unsigned
1844 canon_hash (x, mode)
1845 rtx x;
1846 enum machine_mode mode;
1848 register int i, j;
1849 register unsigned hash = 0;
1850 register enum rtx_code code;
1851 register char *fmt;
1853 /* repeat is used to turn tail-recursion into iteration. */
1854 repeat:
1855 if (x == 0)
1856 return hash;
1858 code = GET_CODE (x);
1859 switch (code)
1861 case REG:
1863 register int regno = REGNO (x);
1865 /* On some machines, we can't record any non-fixed hard register,
1866 because extending its life will cause reload problems. We
1867 consider ap, fp, and sp to be fixed for this purpose.
1868 On all machines, we can't record any global registers. */
1870 if (regno < FIRST_PSEUDO_REGISTER
1871 && (global_regs[regno]
1872 #ifdef SMALL_REGISTER_CLASSES
1873 || (! fixed_regs[regno]
1874 && regno != FRAME_POINTER_REGNUM
1875 && regno != HARD_FRAME_POINTER_REGNUM
1876 && regno != ARG_POINTER_REGNUM
1877 && regno != STACK_POINTER_REGNUM)
1878 #endif
1881 do_not_record = 1;
1882 return 0;
1884 hash += ((unsigned) REG << 7) + (unsigned) reg_qty[regno];
1885 return hash;
1888 case CONST_INT:
1890 unsigned HOST_WIDE_INT tem = INTVAL (x);
1891 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
1892 return hash;
1895 case CONST_DOUBLE:
1896 /* This is like the general case, except that it only counts
1897 the integers representing the constant. */
1898 hash += (unsigned) code + (unsigned) GET_MODE (x);
1899 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1901 unsigned tem = XINT (x, i);
1902 hash += tem;
1904 return hash;
1906 /* Assume there is only one rtx object for any given label. */
1907 case LABEL_REF:
1908 hash
1909 += ((unsigned) LABEL_REF << 7) + (unsigned HOST_WIDE_INT) XEXP (x, 0);
1910 return hash;
1912 case SYMBOL_REF:
1913 hash
1914 += ((unsigned) SYMBOL_REF << 7) + (unsigned HOST_WIDE_INT) XSTR (x, 0);
1915 return hash;
1917 case MEM:
1918 if (MEM_VOLATILE_P (x))
1920 do_not_record = 1;
1921 return 0;
1923 if (! RTX_UNCHANGING_P (x))
1925 hash_arg_in_memory = 1;
1926 if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
1928 /* Now that we have already found this special case,
1929 might as well speed it up as much as possible. */
1930 hash += (unsigned) MEM;
1931 x = XEXP (x, 0);
1932 goto repeat;
1934 case PRE_DEC:
1935 case PRE_INC:
1936 case POST_DEC:
1937 case POST_INC:
1938 case PC:
1939 case CC0:
1940 case CALL:
1941 case UNSPEC_VOLATILE:
1942 do_not_record = 1;
1943 return 0;
1945 case ASM_OPERANDS:
1946 if (MEM_VOLATILE_P (x))
1948 do_not_record = 1;
1949 return 0;
1953 i = GET_RTX_LENGTH (code) - 1;
1954 hash += (unsigned) code + (unsigned) GET_MODE (x);
1955 fmt = GET_RTX_FORMAT (code);
1956 for (; i >= 0; i--)
1958 if (fmt[i] == 'e')
1960 rtx tem = XEXP (x, i);
1962 /* If we are about to do the last recursive call
1963 needed at this level, change it into iteration.
1964 This function is called enough to be worth it. */
1965 if (i == 0)
1967 x = tem;
1968 goto repeat;
1970 hash += canon_hash (tem, 0);
1972 else if (fmt[i] == 'E')
1973 for (j = 0; j < XVECLEN (x, i); j++)
1974 hash += canon_hash (XVECEXP (x, i, j), 0);
1975 else if (fmt[i] == 's')
1977 register unsigned char *p = (unsigned char *) XSTR (x, i);
1978 if (p)
1979 while (*p)
1980 hash += *p++;
1982 else if (fmt[i] == 'i')
1984 register unsigned tem = XINT (x, i);
1985 hash += tem;
1987 else
1988 abort ();
1990 return hash;
1993 /* Like canon_hash but with no side effects. */
1995 static unsigned
1996 safe_hash (x, mode)
1997 rtx x;
1998 enum machine_mode mode;
2000 int save_do_not_record = do_not_record;
2001 int save_hash_arg_in_memory = hash_arg_in_memory;
2002 int save_hash_arg_in_struct = hash_arg_in_struct;
2003 unsigned hash = canon_hash (x, mode);
2004 hash_arg_in_memory = save_hash_arg_in_memory;
2005 hash_arg_in_struct = save_hash_arg_in_struct;
2006 do_not_record = save_do_not_record;
2007 return hash;
2010 /* Return 1 iff X and Y would canonicalize into the same thing,
2011 without actually constructing the canonicalization of either one.
2012 If VALIDATE is nonzero,
2013 we assume X is an expression being processed from the rtl
2014 and Y was found in the hash table. We check register refs
2015 in Y for being marked as valid.
2017 If EQUAL_VALUES is nonzero, we allow a register to match a constant value
2018 that is known to be in the register. Ordinarily, we don't allow them
2019 to match, because letting them match would cause unpredictable results
2020 in all the places that search a hash table chain for an equivalent
2021 for a given value. A possible equivalent that has different structure
2022 has its hash code computed from different data. Whether the hash code
2023 is the same as that of the the given value is pure luck. */
2025 static int
2026 exp_equiv_p (x, y, validate, equal_values)
2027 rtx x, y;
2028 int validate;
2029 int equal_values;
2031 register int i, j;
2032 register enum rtx_code code;
2033 register char *fmt;
2035 /* Note: it is incorrect to assume an expression is equivalent to itself
2036 if VALIDATE is nonzero. */
2037 if (x == y && !validate)
2038 return 1;
2039 if (x == 0 || y == 0)
2040 return x == y;
2042 code = GET_CODE (x);
2043 if (code != GET_CODE (y))
2045 if (!equal_values)
2046 return 0;
2048 /* If X is a constant and Y is a register or vice versa, they may be
2049 equivalent. We only have to validate if Y is a register. */
2050 if (CONSTANT_P (x) && GET_CODE (y) == REG
2051 && REGNO_QTY_VALID_P (REGNO (y))
2052 && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
2053 && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
2054 && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
2055 return 1;
2057 if (CONSTANT_P (y) && code == REG
2058 && REGNO_QTY_VALID_P (REGNO (x))
2059 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
2060 && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
2061 return 1;
2063 return 0;
2066 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
2067 if (GET_MODE (x) != GET_MODE (y))
2068 return 0;
2070 switch (code)
2072 case PC:
2073 case CC0:
2074 return x == y;
2076 case CONST_INT:
2077 return INTVAL (x) == INTVAL (y);
2079 case LABEL_REF:
2080 return XEXP (x, 0) == XEXP (y, 0);
2082 case SYMBOL_REF:
2083 return XSTR (x, 0) == XSTR (y, 0);
2085 case REG:
2087 int regno = REGNO (y);
2088 int endregno
2089 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
2090 : HARD_REGNO_NREGS (regno, GET_MODE (y)));
2091 int i;
2093 /* If the quantities are not the same, the expressions are not
2094 equivalent. If there are and we are not to validate, they
2095 are equivalent. Otherwise, ensure all regs are up-to-date. */
2097 if (reg_qty[REGNO (x)] != reg_qty[regno])
2098 return 0;
2100 if (! validate)
2101 return 1;
2103 for (i = regno; i < endregno; i++)
2104 if (reg_in_table[i] != reg_tick[i])
2105 return 0;
2107 return 1;
2110 /* For commutative operations, check both orders. */
2111 case PLUS:
2112 case MULT:
2113 case AND:
2114 case IOR:
2115 case XOR:
2116 case NE:
2117 case EQ:
2118 return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
2119 && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
2120 validate, equal_values))
2121 || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
2122 validate, equal_values)
2123 && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
2124 validate, equal_values)));
2127 /* Compare the elements. If any pair of corresponding elements
2128 fail to match, return 0 for the whole things. */
2130 fmt = GET_RTX_FORMAT (code);
2131 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2133 switch (fmt[i])
2135 case 'e':
2136 if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
2137 return 0;
2138 break;
2140 case 'E':
2141 if (XVECLEN (x, i) != XVECLEN (y, i))
2142 return 0;
2143 for (j = 0; j < XVECLEN (x, i); j++)
2144 if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2145 validate, equal_values))
2146 return 0;
2147 break;
2149 case 's':
2150 if (strcmp (XSTR (x, i), XSTR (y, i)))
2151 return 0;
2152 break;
2154 case 'i':
2155 if (XINT (x, i) != XINT (y, i))
2156 return 0;
2157 break;
2159 case 'w':
2160 if (XWINT (x, i) != XWINT (y, i))
2161 return 0;
2162 break;
2164 case '0':
2165 break;
2167 default:
2168 abort ();
2172 return 1;
2175 /* Return 1 iff any subexpression of X matches Y.
2176 Here we do not require that X or Y be valid (for registers referred to)
2177 for being in the hash table. */
2179 static int
2180 refers_to_p (x, y)
2181 rtx x, y;
2183 register int i;
2184 register enum rtx_code code;
2185 register char *fmt;
2187 repeat:
2188 if (x == y)
2189 return 1;
2190 if (x == 0 || y == 0)
2191 return 0;
2193 code = GET_CODE (x);
2194 /* If X as a whole has the same code as Y, they may match.
2195 If so, return 1. */
2196 if (code == GET_CODE (y))
2198 if (exp_equiv_p (x, y, 0, 1))
2199 return 1;
2202 /* X does not match, so try its subexpressions. */
2204 fmt = GET_RTX_FORMAT (code);
2205 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2206 if (fmt[i] == 'e')
2208 if (i == 0)
2210 x = XEXP (x, 0);
2211 goto repeat;
2213 else
2214 if (refers_to_p (XEXP (x, i), y))
2215 return 1;
2217 else if (fmt[i] == 'E')
2219 int j;
2220 for (j = 0; j < XVECLEN (x, i); j++)
2221 if (refers_to_p (XVECEXP (x, i, j), y))
2222 return 1;
2225 return 0;
2228 /* Given ADDR and SIZE (a memory address, and the size of the memory reference),
2229 set PBASE, PSTART, and PEND which correspond to the base of the address,
2230 the starting offset, and ending offset respectively.
2232 ADDR is known to be a nonvarying address. */
2234 /* ??? Despite what the comments say, this function is in fact frequently
2235 passed varying addresses. This does not appear to cause any problems. */
2237 static void
2238 set_nonvarying_address_components (addr, size, pbase, pstart, pend)
2239 rtx addr;
2240 int size;
2241 rtx *pbase;
2242 HOST_WIDE_INT *pstart, *pend;
2244 rtx base;
2245 HOST_WIDE_INT start, end;
2247 base = addr;
2248 start = 0;
2249 end = 0;
2251 /* Registers with nonvarying addresses usually have constant equivalents;
2252 but the frame pointer register is also possible. */
2253 if (GET_CODE (base) == REG
2254 && qty_const != 0
2255 && REGNO_QTY_VALID_P (REGNO (base))
2256 && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
2257 && qty_const[reg_qty[REGNO (base)]] != 0)
2258 base = qty_const[reg_qty[REGNO (base)]];
2259 else if (GET_CODE (base) == PLUS
2260 && GET_CODE (XEXP (base, 1)) == CONST_INT
2261 && GET_CODE (XEXP (base, 0)) == REG
2262 && qty_const != 0
2263 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2264 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2265 == GET_MODE (XEXP (base, 0)))
2266 && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
2268 start = INTVAL (XEXP (base, 1));
2269 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2272 /* Handle everything that we can find inside an address that has been
2273 viewed as constant. */
2275 while (1)
2277 /* If no part of this switch does a "continue", the code outside
2278 will exit this loop. */
2280 switch (GET_CODE (base))
2282 case LO_SUM:
2283 /* By definition, operand1 of a LO_SUM is the associated constant
2284 address. Use the associated constant address as the base
2285 instead. */
2286 base = XEXP (base, 1);
2287 continue;
2289 case CONST:
2290 /* Strip off CONST. */
2291 base = XEXP (base, 0);
2292 continue;
2294 case PLUS:
2295 if (GET_CODE (XEXP (base, 1)) == CONST_INT)
2297 start += INTVAL (XEXP (base, 1));
2298 base = XEXP (base, 0);
2299 continue;
2301 break;
2303 case AND:
2304 /* Handle the case of an AND which is the negative of a power of
2305 two. This is used to represent unaligned memory operations. */
2306 if (GET_CODE (XEXP (base, 1)) == CONST_INT
2307 && exact_log2 (- INTVAL (XEXP (base, 1))) > 0)
2309 set_nonvarying_address_components (XEXP (base, 0), size,
2310 pbase, pstart, pend);
2312 /* Assume the worst misalignment. START is affected, but not
2313 END, so compensate but adjusting SIZE. Don't lose any
2314 constant we already had. */
2316 size = *pend - *pstart - INTVAL (XEXP (base, 1)) - 1;
2317 start += *pstart - INTVAL (XEXP (base, 1)) - 1;
2318 base = *pbase;
2320 break;
2323 break;
2326 if (GET_CODE (base) == CONST_INT)
2328 start += INTVAL (base);
2329 base = const0_rtx;
2332 end = start + size;
2334 /* Set the return values. */
2335 *pbase = base;
2336 *pstart = start;
2337 *pend = end;
2340 /* Return 1 iff any subexpression of X refers to memory
2341 at an address of BASE plus some offset
2342 such that any of the bytes' offsets fall between START (inclusive)
2343 and END (exclusive).
2345 The value is undefined if X is a varying address (as determined by
2346 cse_rtx_addr_varies_p). This function is not used in such cases.
2348 When used in the cse pass, `qty_const' is nonzero, and it is used
2349 to treat an address that is a register with a known constant value
2350 as if it were that constant value.
2351 In the loop pass, `qty_const' is zero, so this is not done. */
2353 static int
2354 refers_to_mem_p (x, base, start, end)
2355 rtx x, base;
2356 HOST_WIDE_INT start, end;
2358 register HOST_WIDE_INT i;
2359 register enum rtx_code code;
2360 register char *fmt;
2362 repeat:
2363 if (x == 0)
2364 return 0;
2366 code = GET_CODE (x);
2367 if (code == MEM)
2369 register rtx addr = XEXP (x, 0); /* Get the address. */
2370 rtx mybase;
2371 HOST_WIDE_INT mystart, myend;
2373 set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)),
2374 &mybase, &mystart, &myend);
2377 /* refers_to_mem_p is never called with varying addresses.
2378 If the base addresses are not equal, there is no chance
2379 of the memory addresses conflicting. */
2380 if (! rtx_equal_p (mybase, base))
2381 return 0;
2383 return myend > start && mystart < end;
2386 /* X does not match, so try its subexpressions. */
2388 fmt = GET_RTX_FORMAT (code);
2389 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2390 if (fmt[i] == 'e')
2392 if (i == 0)
2394 x = XEXP (x, 0);
2395 goto repeat;
2397 else
2398 if (refers_to_mem_p (XEXP (x, i), base, start, end))
2399 return 1;
2401 else if (fmt[i] == 'E')
2403 int j;
2404 for (j = 0; j < XVECLEN (x, i); j++)
2405 if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end))
2406 return 1;
2409 return 0;
2412 /* Nonzero if X refers to memory at a varying address;
2413 except that a register which has at the moment a known constant value
2414 isn't considered variable. */
2416 static int
2417 cse_rtx_addr_varies_p (x)
2418 rtx x;
2420 /* We need not check for X and the equivalence class being of the same
2421 mode because if X is equivalent to a constant in some mode, it
2422 doesn't vary in any mode. */
2424 if (GET_CODE (x) == MEM
2425 && GET_CODE (XEXP (x, 0)) == REG
2426 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
2427 && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]
2428 && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
2429 return 0;
2431 if (GET_CODE (x) == MEM
2432 && GET_CODE (XEXP (x, 0)) == PLUS
2433 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2434 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2435 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2436 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2437 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2438 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2439 return 0;
2441 return rtx_addr_varies_p (x);
2444 /* Canonicalize an expression:
2445 replace each register reference inside it
2446 with the "oldest" equivalent register.
2448 If INSN is non-zero and we are replacing a pseudo with a hard register
2449 or vice versa, validate_change is used to ensure that INSN remains valid
2450 after we make our substitution. The calls are made with IN_GROUP non-zero
2451 so apply_change_group must be called upon the outermost return from this
2452 function (unless INSN is zero). The result of apply_change_group can
2453 generally be discarded since the changes we are making are optional. */
2455 static rtx
2456 canon_reg (x, insn)
2457 rtx x;
2458 rtx insn;
2460 register int i;
2461 register enum rtx_code code;
2462 register char *fmt;
2464 if (x == 0)
2465 return x;
2467 code = GET_CODE (x);
2468 switch (code)
2470 case PC:
2471 case CC0:
2472 case CONST:
2473 case CONST_INT:
2474 case CONST_DOUBLE:
2475 case SYMBOL_REF:
2476 case LABEL_REF:
2477 case ADDR_VEC:
2478 case ADDR_DIFF_VEC:
2479 return x;
2481 case REG:
2483 register int first;
2485 /* Never replace a hard reg, because hard regs can appear
2486 in more than one machine mode, and we must preserve the mode
2487 of each occurrence. Also, some hard regs appear in
2488 MEMs that are shared and mustn't be altered. Don't try to
2489 replace any reg that maps to a reg of class NO_REGS. */
2490 if (REGNO (x) < FIRST_PSEUDO_REGISTER
2491 || ! REGNO_QTY_VALID_P (REGNO (x)))
2492 return x;
2494 first = qty_first_reg[reg_qty[REGNO (x)]];
2495 return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
2496 : REGNO_REG_CLASS (first) == NO_REGS ? x
2497 : gen_rtx (REG, qty_mode[reg_qty[REGNO (x)]], first));
2501 fmt = GET_RTX_FORMAT (code);
2502 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2504 register int j;
2506 if (fmt[i] == 'e')
2508 rtx new = canon_reg (XEXP (x, i), insn);
2510 /* If replacing pseudo with hard reg or vice versa, ensure the
2511 insn remains valid. Likewise if the insn has MATCH_DUPs. */
2512 if (insn != 0 && new != 0
2513 && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
2514 && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
2515 != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
2516 || insn_n_dups[recog_memoized (insn)] > 0))
2517 validate_change (insn, &XEXP (x, i), new, 1);
2518 else
2519 XEXP (x, i) = new;
2521 else if (fmt[i] == 'E')
2522 for (j = 0; j < XVECLEN (x, i); j++)
2523 XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
2526 return x;
2529 /* LOC is a location with INSN that is an operand address (the contents of
2530 a MEM). Find the best equivalent address to use that is valid for this
2531 insn.
2533 On most CISC machines, complicated address modes are costly, and rtx_cost
2534 is a good approximation for that cost. However, most RISC machines have
2535 only a few (usually only one) memory reference formats. If an address is
2536 valid at all, it is often just as cheap as any other address. Hence, for
2537 RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
2538 costs of various addresses. For two addresses of equal cost, choose the one
2539 with the highest `rtx_cost' value as that has the potential of eliminating
2540 the most insns. For equal costs, we choose the first in the equivalence
2541 class. Note that we ignore the fact that pseudo registers are cheaper
2542 than hard registers here because we would also prefer the pseudo registers.
2545 static void
2546 find_best_addr (insn, loc)
2547 rtx insn;
2548 rtx *loc;
2550 struct table_elt *elt, *p;
2551 rtx addr = *loc;
2552 int our_cost;
2553 int found_better = 1;
2554 int save_do_not_record = do_not_record;
2555 int save_hash_arg_in_memory = hash_arg_in_memory;
2556 int save_hash_arg_in_struct = hash_arg_in_struct;
2557 int addr_volatile;
2558 int regno;
2559 unsigned hash;
2561 /* Do not try to replace constant addresses or addresses of local and
2562 argument slots. These MEM expressions are made only once and inserted
2563 in many instructions, as well as being used to control symbol table
2564 output. It is not safe to clobber them.
2566 There are some uncommon cases where the address is already in a register
2567 for some reason, but we cannot take advantage of that because we have
2568 no easy way to unshare the MEM. In addition, looking up all stack
2569 addresses is costly. */
2570 if ((GET_CODE (addr) == PLUS
2571 && GET_CODE (XEXP (addr, 0)) == REG
2572 && GET_CODE (XEXP (addr, 1)) == CONST_INT
2573 && (regno = REGNO (XEXP (addr, 0)),
2574 regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
2575 || regno == ARG_POINTER_REGNUM))
2576 || (GET_CODE (addr) == REG
2577 && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
2578 || regno == HARD_FRAME_POINTER_REGNUM
2579 || regno == ARG_POINTER_REGNUM))
2580 || CONSTANT_ADDRESS_P (addr))
2581 return;
2583 /* If this address is not simply a register, try to fold it. This will
2584 sometimes simplify the expression. Many simplifications
2585 will not be valid, but some, usually applying the associative rule, will
2586 be valid and produce better code. */
2587 if (GET_CODE (addr) != REG
2588 && validate_change (insn, loc, fold_rtx (addr, insn), 0))
2589 addr = *loc;
2591 /* If this address is not in the hash table, we can't look for equivalences
2592 of the whole address. Also, ignore if volatile. */
2594 do_not_record = 0;
2595 hash = HASH (addr, Pmode);
2596 addr_volatile = do_not_record;
2597 do_not_record = save_do_not_record;
2598 hash_arg_in_memory = save_hash_arg_in_memory;
2599 hash_arg_in_struct = save_hash_arg_in_struct;
2601 if (addr_volatile)
2602 return;
2604 elt = lookup (addr, hash, Pmode);
2606 #ifndef ADDRESS_COST
2607 if (elt)
2609 our_cost = elt->cost;
2611 /* Find the lowest cost below ours that works. */
2612 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
2613 if (elt->cost < our_cost
2614 && (GET_CODE (elt->exp) == REG
2615 || exp_equiv_p (elt->exp, elt->exp, 1, 0))
2616 && validate_change (insn, loc,
2617 canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
2618 return;
2620 #else
2622 if (elt)
2624 /* We need to find the best (under the criteria documented above) entry
2625 in the class that is valid. We use the `flag' field to indicate
2626 choices that were invalid and iterate until we can't find a better
2627 one that hasn't already been tried. */
2629 for (p = elt->first_same_value; p; p = p->next_same_value)
2630 p->flag = 0;
2632 while (found_better)
2634 int best_addr_cost = ADDRESS_COST (*loc);
2635 int best_rtx_cost = (elt->cost + 1) >> 1;
2636 struct table_elt *best_elt = elt;
2638 found_better = 0;
2639 for (p = elt->first_same_value; p; p = p->next_same_value)
2640 if (! p->flag
2641 && (GET_CODE (p->exp) == REG
2642 || exp_equiv_p (p->exp, p->exp, 1, 0))
2643 && (ADDRESS_COST (p->exp) < best_addr_cost
2644 || (ADDRESS_COST (p->exp) == best_addr_cost
2645 && (p->cost + 1) >> 1 > best_rtx_cost)))
2647 found_better = 1;
2648 best_addr_cost = ADDRESS_COST (p->exp);
2649 best_rtx_cost = (p->cost + 1) >> 1;
2650 best_elt = p;
2653 if (found_better)
2655 if (validate_change (insn, loc,
2656 canon_reg (copy_rtx (best_elt->exp),
2657 NULL_RTX), 0))
2658 return;
2659 else
2660 best_elt->flag = 1;
2665 /* If the address is a binary operation with the first operand a register
2666 and the second a constant, do the same as above, but looking for
2667 equivalences of the register. Then try to simplify before checking for
2668 the best address to use. This catches a few cases: First is when we
2669 have REG+const and the register is another REG+const. We can often merge
2670 the constants and eliminate one insn and one register. It may also be
2671 that a machine has a cheap REG+REG+const. Finally, this improves the
2672 code on the Alpha for unaligned byte stores. */
2674 if (flag_expensive_optimizations
2675 && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
2676 || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
2677 && GET_CODE (XEXP (*loc, 0)) == REG
2678 && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
2680 rtx c = XEXP (*loc, 1);
2682 do_not_record = 0;
2683 hash = HASH (XEXP (*loc, 0), Pmode);
2684 do_not_record = save_do_not_record;
2685 hash_arg_in_memory = save_hash_arg_in_memory;
2686 hash_arg_in_struct = save_hash_arg_in_struct;
2688 elt = lookup (XEXP (*loc, 0), hash, Pmode);
2689 if (elt == 0)
2690 return;
2692 /* We need to find the best (under the criteria documented above) entry
2693 in the class that is valid. We use the `flag' field to indicate
2694 choices that were invalid and iterate until we can't find a better
2695 one that hasn't already been tried. */
2697 for (p = elt->first_same_value; p; p = p->next_same_value)
2698 p->flag = 0;
2700 while (found_better)
2702 int best_addr_cost = ADDRESS_COST (*loc);
2703 int best_rtx_cost = (COST (*loc) + 1) >> 1;
2704 struct table_elt *best_elt = elt;
2705 rtx best_rtx = *loc;
2706 int count;
2708 /* This is at worst case an O(n^2) algorithm, so limit our search
2709 to the first 32 elements on the list. This avoids trouble
2710 compiling code with very long basic blocks that can easily
2711 call cse_gen_binary so many times that we run out of memory. */
2713 found_better = 0;
2714 for (p = elt->first_same_value, count = 0;
2715 p && count < 32;
2716 p = p->next_same_value, count++)
2717 if (! p->flag
2718 && (GET_CODE (p->exp) == REG
2719 || exp_equiv_p (p->exp, p->exp, 1, 0)))
2721 rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
2723 if ((ADDRESS_COST (new) < best_addr_cost
2724 || (ADDRESS_COST (new) == best_addr_cost
2725 && (COST (new) + 1) >> 1 > best_rtx_cost)))
2727 found_better = 1;
2728 best_addr_cost = ADDRESS_COST (new);
2729 best_rtx_cost = (COST (new) + 1) >> 1;
2730 best_elt = p;
2731 best_rtx = new;
2735 if (found_better)
2737 if (validate_change (insn, loc,
2738 canon_reg (copy_rtx (best_rtx),
2739 NULL_RTX), 0))
2740 return;
2741 else
2742 best_elt->flag = 1;
2746 #endif
2749 /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
2750 operation (EQ, NE, GT, etc.), follow it back through the hash table and
2751 what values are being compared.
2753 *PARG1 and *PARG2 are updated to contain the rtx representing the values
2754 actually being compared. For example, if *PARG1 was (cc0) and *PARG2
2755 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
2756 compared to produce cc0.
2758 The return value is the comparison operator and is either the code of
2759 A or the code corresponding to the inverse of the comparison. */
2761 static enum rtx_code
2762 find_comparison_args (code, parg1, parg2, pmode1, pmode2)
2763 enum rtx_code code;
2764 rtx *parg1, *parg2;
2765 enum machine_mode *pmode1, *pmode2;
2767 rtx arg1, arg2;
2769 arg1 = *parg1, arg2 = *parg2;
2771 /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
2773 while (arg2 == CONST0_RTX (GET_MODE (arg1)))
2775 /* Set non-zero when we find something of interest. */
2776 rtx x = 0;
2777 int reverse_code = 0;
2778 struct table_elt *p = 0;
2780 /* If arg1 is a COMPARE, extract the comparison arguments from it.
2781 On machines with CC0, this is the only case that can occur, since
2782 fold_rtx will return the COMPARE or item being compared with zero
2783 when given CC0. */
2785 if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
2786 x = arg1;
2788 /* If ARG1 is a comparison operator and CODE is testing for
2789 STORE_FLAG_VALUE, get the inner arguments. */
2791 else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
2793 if (code == NE
2794 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2795 && code == LT && STORE_FLAG_VALUE == -1)
2796 #ifdef FLOAT_STORE_FLAG_VALUE
2797 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2798 && FLOAT_STORE_FLAG_VALUE < 0)
2799 #endif
2801 x = arg1;
2802 else if (code == EQ
2803 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2804 && code == GE && STORE_FLAG_VALUE == -1)
2805 #ifdef FLOAT_STORE_FLAG_VALUE
2806 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2807 && FLOAT_STORE_FLAG_VALUE < 0)
2808 #endif
2810 x = arg1, reverse_code = 1;
2813 /* ??? We could also check for
2815 (ne (and (eq (...) (const_int 1))) (const_int 0))
2817 and related forms, but let's wait until we see them occurring. */
2819 if (x == 0)
2820 /* Look up ARG1 in the hash table and see if it has an equivalence
2821 that lets us see what is being compared. */
2822 p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
2823 GET_MODE (arg1));
2824 if (p) p = p->first_same_value;
2826 for (; p; p = p->next_same_value)
2828 enum machine_mode inner_mode = GET_MODE (p->exp);
2830 /* If the entry isn't valid, skip it. */
2831 if (! exp_equiv_p (p->exp, p->exp, 1, 0))
2832 continue;
2834 if (GET_CODE (p->exp) == COMPARE
2835 /* Another possibility is that this machine has a compare insn
2836 that includes the comparison code. In that case, ARG1 would
2837 be equivalent to a comparison operation that would set ARG1 to
2838 either STORE_FLAG_VALUE or zero. If this is an NE operation,
2839 ORIG_CODE is the actual comparison being done; if it is an EQ,
2840 we must reverse ORIG_CODE. On machine with a negative value
2841 for STORE_FLAG_VALUE, also look at LT and GE operations. */
2842 || ((code == NE
2843 || (code == LT
2844 && GET_MODE_CLASS (inner_mode) == MODE_INT
2845 && (GET_MODE_BITSIZE (inner_mode)
2846 <= HOST_BITS_PER_WIDE_INT)
2847 && (STORE_FLAG_VALUE
2848 & ((HOST_WIDE_INT) 1
2849 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2850 #ifdef FLOAT_STORE_FLAG_VALUE
2851 || (code == LT
2852 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2853 && FLOAT_STORE_FLAG_VALUE < 0)
2854 #endif
2856 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
2858 x = p->exp;
2859 break;
2861 else if ((code == EQ
2862 || (code == GE
2863 && GET_MODE_CLASS (inner_mode) == MODE_INT
2864 && (GET_MODE_BITSIZE (inner_mode)
2865 <= HOST_BITS_PER_WIDE_INT)
2866 && (STORE_FLAG_VALUE
2867 & ((HOST_WIDE_INT) 1
2868 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2869 #ifdef FLOAT_STORE_FLAG_VALUE
2870 || (code == GE
2871 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2872 && FLOAT_STORE_FLAG_VALUE < 0)
2873 #endif
2875 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
2877 reverse_code = 1;
2878 x = p->exp;
2879 break;
2882 /* If this is fp + constant, the equivalent is a better operand since
2883 it may let us predict the value of the comparison. */
2884 else if (NONZERO_BASE_PLUS_P (p->exp))
2886 arg1 = p->exp;
2887 continue;
2891 /* If we didn't find a useful equivalence for ARG1, we are done.
2892 Otherwise, set up for the next iteration. */
2893 if (x == 0)
2894 break;
2896 arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
2897 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
2898 code = GET_CODE (x);
2900 if (reverse_code)
2901 code = reverse_condition (code);
2904 /* Return our results. Return the modes from before fold_rtx
2905 because fold_rtx might produce const_int, and then it's too late. */
2906 *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
2907 *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
2909 return code;
2912 /* Try to simplify a unary operation CODE whose output mode is to be
2913 MODE with input operand OP whose mode was originally OP_MODE.
2914 Return zero if no simplification can be made. */
2917 simplify_unary_operation (code, mode, op, op_mode)
2918 enum rtx_code code;
2919 enum machine_mode mode;
2920 rtx op;
2921 enum machine_mode op_mode;
2923 register int width = GET_MODE_BITSIZE (mode);
2925 /* The order of these tests is critical so that, for example, we don't
2926 check the wrong mode (input vs. output) for a conversion operation,
2927 such as FIX. At some point, this should be simplified. */
2929 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
2931 if (code == FLOAT && GET_MODE (op) == VOIDmode
2932 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
2934 HOST_WIDE_INT hv, lv;
2935 REAL_VALUE_TYPE d;
2937 if (GET_CODE (op) == CONST_INT)
2938 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
2939 else
2940 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
2942 #ifdef REAL_ARITHMETIC
2943 REAL_VALUE_FROM_INT (d, lv, hv);
2944 #else
2945 if (hv < 0)
2947 d = (double) (~ hv);
2948 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
2949 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
2950 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
2951 d = (- d - 1.0);
2953 else
2955 d = (double) hv;
2956 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
2957 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
2958 d += (double) (unsigned HOST_WIDE_INT) lv;
2960 #endif /* REAL_ARITHMETIC */
2962 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2964 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
2965 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
2967 HOST_WIDE_INT hv, lv;
2968 REAL_VALUE_TYPE d;
2970 if (GET_CODE (op) == CONST_INT)
2971 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
2972 else
2973 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
2975 if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
2977 else
2978 hv = 0, lv &= GET_MODE_MASK (op_mode);
2980 #ifdef REAL_ARITHMETIC
2981 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv);
2982 #else
2984 d = (double) (unsigned HOST_WIDE_INT) hv;
2985 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
2986 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
2987 d += (double) (unsigned HOST_WIDE_INT) lv;
2988 #endif /* REAL_ARITHMETIC */
2990 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2992 #endif
2994 if (GET_CODE (op) == CONST_INT
2995 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
2997 register HOST_WIDE_INT arg0 = INTVAL (op);
2998 register HOST_WIDE_INT val;
3000 switch (code)
3002 case NOT:
3003 val = ~ arg0;
3004 break;
3006 case NEG:
3007 val = - arg0;
3008 break;
3010 case ABS:
3011 val = (arg0 >= 0 ? arg0 : - arg0);
3012 break;
3014 case FFS:
3015 /* Don't use ffs here. Instead, get low order bit and then its
3016 number. If arg0 is zero, this will return 0, as desired. */
3017 arg0 &= GET_MODE_MASK (mode);
3018 val = exact_log2 (arg0 & (- arg0)) + 1;
3019 break;
3021 case TRUNCATE:
3022 val = arg0;
3023 break;
3025 case ZERO_EXTEND:
3026 if (op_mode == VOIDmode)
3027 op_mode = mode;
3028 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3030 /* If we were really extending the mode,
3031 we would have to distinguish between zero-extension
3032 and sign-extension. */
3033 if (width != GET_MODE_BITSIZE (op_mode))
3034 abort ();
3035 val = arg0;
3037 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3038 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3039 else
3040 return 0;
3041 break;
3043 case SIGN_EXTEND:
3044 if (op_mode == VOIDmode)
3045 op_mode = mode;
3046 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3048 /* If we were really extending the mode,
3049 we would have to distinguish between zero-extension
3050 and sign-extension. */
3051 if (width != GET_MODE_BITSIZE (op_mode))
3052 abort ();
3053 val = arg0;
3055 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3058 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3059 if (val
3060 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
3061 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3063 else
3064 return 0;
3065 break;
3067 case SQRT:
3068 return 0;
3070 default:
3071 abort ();
3074 /* Clear the bits that don't belong in our mode,
3075 unless they and our sign bit are all one.
3076 So we get either a reasonable negative value or a reasonable
3077 unsigned value for this mode. */
3078 if (width < HOST_BITS_PER_WIDE_INT
3079 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3080 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3081 val &= (1 << width) - 1;
3083 return GEN_INT (val);
3086 /* We can do some operations on integer CONST_DOUBLEs. Also allow
3087 for a DImode operation on a CONST_INT. */
3088 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
3089 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3091 HOST_WIDE_INT l1, h1, lv, hv;
3093 if (GET_CODE (op) == CONST_DOUBLE)
3094 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
3095 else
3096 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
3098 switch (code)
3100 case NOT:
3101 lv = ~ l1;
3102 hv = ~ h1;
3103 break;
3105 case NEG:
3106 neg_double (l1, h1, &lv, &hv);
3107 break;
3109 case ABS:
3110 if (h1 < 0)
3111 neg_double (l1, h1, &lv, &hv);
3112 else
3113 lv = l1, hv = h1;
3114 break;
3116 case FFS:
3117 hv = 0;
3118 if (l1 == 0)
3119 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
3120 else
3121 lv = exact_log2 (l1 & (-l1)) + 1;
3122 break;
3124 case TRUNCATE:
3125 /* This is just a change-of-mode, so do nothing. */
3126 lv = l1, hv = h1;
3127 break;
3129 case ZERO_EXTEND:
3130 if (op_mode == VOIDmode
3131 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3132 return 0;
3134 hv = 0;
3135 lv = l1 & GET_MODE_MASK (op_mode);
3136 break;
3138 case SIGN_EXTEND:
3139 if (op_mode == VOIDmode
3140 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3141 return 0;
3142 else
3144 lv = l1 & GET_MODE_MASK (op_mode);
3145 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
3146 && (lv & ((HOST_WIDE_INT) 1
3147 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
3148 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3150 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
3152 break;
3154 case SQRT:
3155 return 0;
3157 default:
3158 return 0;
3161 return immed_double_const (lv, hv, mode);
3164 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3165 else if (GET_CODE (op) == CONST_DOUBLE
3166 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3168 REAL_VALUE_TYPE d;
3169 jmp_buf handler;
3170 rtx x;
3172 if (setjmp (handler))
3173 /* There used to be a warning here, but that is inadvisable.
3174 People may want to cause traps, and the natural way
3175 to do it should not get a warning. */
3176 return 0;
3178 set_float_handler (handler);
3180 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3182 switch (code)
3184 case NEG:
3185 d = REAL_VALUE_NEGATE (d);
3186 break;
3188 case ABS:
3189 if (REAL_VALUE_NEGATIVE (d))
3190 d = REAL_VALUE_NEGATE (d);
3191 break;
3193 case FLOAT_TRUNCATE:
3194 d = real_value_truncate (mode, d);
3195 break;
3197 case FLOAT_EXTEND:
3198 /* All this does is change the mode. */
3199 break;
3201 case FIX:
3202 d = REAL_VALUE_RNDZINT (d);
3203 break;
3205 case UNSIGNED_FIX:
3206 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
3207 break;
3209 case SQRT:
3210 return 0;
3212 default:
3213 abort ();
3216 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3217 set_float_handler (NULL_PTR);
3218 return x;
3221 else if (GET_CODE (op) == CONST_DOUBLE
3222 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
3223 && GET_MODE_CLASS (mode) == MODE_INT
3224 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3226 REAL_VALUE_TYPE d;
3227 jmp_buf handler;
3228 HOST_WIDE_INT val;
3230 if (setjmp (handler))
3231 return 0;
3233 set_float_handler (handler);
3235 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3237 switch (code)
3239 case FIX:
3240 val = REAL_VALUE_FIX (d);
3241 break;
3243 case UNSIGNED_FIX:
3244 val = REAL_VALUE_UNSIGNED_FIX (d);
3245 break;
3247 default:
3248 abort ();
3251 set_float_handler (NULL_PTR);
3253 /* Clear the bits that don't belong in our mode,
3254 unless they and our sign bit are all one.
3255 So we get either a reasonable negative value or a reasonable
3256 unsigned value for this mode. */
3257 if (width < HOST_BITS_PER_WIDE_INT
3258 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3259 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3260 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3262 return GEN_INT (val);
3264 #endif
3265 /* This was formerly used only for non-IEEE float.
3266 eggert@twinsun.com says it is safe for IEEE also. */
3267 else
3269 /* There are some simplifications we can do even if the operands
3270 aren't constant. */
3271 switch (code)
3273 case NEG:
3274 case NOT:
3275 /* (not (not X)) == X, similarly for NEG. */
3276 if (GET_CODE (op) == code)
3277 return XEXP (op, 0);
3278 break;
3280 case SIGN_EXTEND:
3281 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
3282 becomes just the MINUS if its mode is MODE. This allows
3283 folding switch statements on machines using casesi (such as
3284 the Vax). */
3285 if (GET_CODE (op) == TRUNCATE
3286 && GET_MODE (XEXP (op, 0)) == mode
3287 && GET_CODE (XEXP (op, 0)) == MINUS
3288 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
3289 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
3290 return XEXP (op, 0);
3291 break;
3294 return 0;
3298 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
3299 and OP1. Return 0 if no simplification is possible.
3301 Don't use this for relational operations such as EQ or LT.
3302 Use simplify_relational_operation instead. */
3305 simplify_binary_operation (code, mode, op0, op1)
3306 enum rtx_code code;
3307 enum machine_mode mode;
3308 rtx op0, op1;
3310 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3311 HOST_WIDE_INT val;
3312 int width = GET_MODE_BITSIZE (mode);
3313 rtx tem;
3315 /* Relational operations don't work here. We must know the mode
3316 of the operands in order to do the comparison correctly.
3317 Assuming a full word can give incorrect results.
3318 Consider comparing 128 with -128 in QImode. */
3320 if (GET_RTX_CLASS (code) == '<')
3321 abort ();
3323 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3324 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3325 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
3326 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3328 REAL_VALUE_TYPE f0, f1, value;
3329 jmp_buf handler;
3331 if (setjmp (handler))
3332 return 0;
3334 set_float_handler (handler);
3336 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3337 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3338 f0 = real_value_truncate (mode, f0);
3339 f1 = real_value_truncate (mode, f1);
3341 #ifdef REAL_ARITHMETIC
3342 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
3343 #else
3344 switch (code)
3346 case PLUS:
3347 value = f0 + f1;
3348 break;
3349 case MINUS:
3350 value = f0 - f1;
3351 break;
3352 case MULT:
3353 value = f0 * f1;
3354 break;
3355 case DIV:
3356 #ifndef REAL_INFINITY
3357 if (f1 == 0)
3358 return 0;
3359 #endif
3360 value = f0 / f1;
3361 break;
3362 case SMIN:
3363 value = MIN (f0, f1);
3364 break;
3365 case SMAX:
3366 value = MAX (f0, f1);
3367 break;
3368 default:
3369 abort ();
3371 #endif
3373 value = real_value_truncate (mode, value);
3374 set_float_handler (NULL_PTR);
3375 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
3377 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3379 /* We can fold some multi-word operations. */
3380 if (GET_MODE_CLASS (mode) == MODE_INT
3381 && width == HOST_BITS_PER_WIDE_INT * 2
3382 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3383 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3385 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
3387 if (GET_CODE (op0) == CONST_DOUBLE)
3388 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3389 else
3390 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
3392 if (GET_CODE (op1) == CONST_DOUBLE)
3393 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3394 else
3395 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
3397 switch (code)
3399 case MINUS:
3400 /* A - B == A + (-B). */
3401 neg_double (l2, h2, &lv, &hv);
3402 l2 = lv, h2 = hv;
3404 /* .. fall through ... */
3406 case PLUS:
3407 add_double (l1, h1, l2, h2, &lv, &hv);
3408 break;
3410 case MULT:
3411 mul_double (l1, h1, l2, h2, &lv, &hv);
3412 break;
3414 case DIV: case MOD: case UDIV: case UMOD:
3415 /* We'd need to include tree.h to do this and it doesn't seem worth
3416 it. */
3417 return 0;
3419 case AND:
3420 lv = l1 & l2, hv = h1 & h2;
3421 break;
3423 case IOR:
3424 lv = l1 | l2, hv = h1 | h2;
3425 break;
3427 case XOR:
3428 lv = l1 ^ l2, hv = h1 ^ h2;
3429 break;
3431 case SMIN:
3432 if (h1 < h2
3433 || (h1 == h2
3434 && ((unsigned HOST_WIDE_INT) l1
3435 < (unsigned HOST_WIDE_INT) l2)))
3436 lv = l1, hv = h1;
3437 else
3438 lv = l2, hv = h2;
3439 break;
3441 case SMAX:
3442 if (h1 > h2
3443 || (h1 == h2
3444 && ((unsigned HOST_WIDE_INT) l1
3445 > (unsigned HOST_WIDE_INT) l2)))
3446 lv = l1, hv = h1;
3447 else
3448 lv = l2, hv = h2;
3449 break;
3451 case UMIN:
3452 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3453 || (h1 == h2
3454 && ((unsigned HOST_WIDE_INT) l1
3455 < (unsigned HOST_WIDE_INT) l2)))
3456 lv = l1, hv = h1;
3457 else
3458 lv = l2, hv = h2;
3459 break;
3461 case UMAX:
3462 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3463 || (h1 == h2
3464 && ((unsigned HOST_WIDE_INT) l1
3465 > (unsigned HOST_WIDE_INT) l2)))
3466 lv = l1, hv = h1;
3467 else
3468 lv = l2, hv = h2;
3469 break;
3471 case LSHIFTRT: case ASHIFTRT:
3472 case ASHIFT:
3473 case ROTATE: case ROTATERT:
3474 #ifdef SHIFT_COUNT_TRUNCATED
3475 if (SHIFT_COUNT_TRUNCATED)
3476 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3477 #endif
3479 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
3480 return 0;
3482 if (code == LSHIFTRT || code == ASHIFTRT)
3483 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3484 code == ASHIFTRT);
3485 else if (code == ASHIFT)
3486 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3487 else if (code == ROTATE)
3488 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3489 else /* code == ROTATERT */
3490 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3491 break;
3493 default:
3494 return 0;
3497 return immed_double_const (lv, hv, mode);
3500 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
3501 || width > HOST_BITS_PER_WIDE_INT || width == 0)
3503 /* Even if we can't compute a constant result,
3504 there are some cases worth simplifying. */
3506 switch (code)
3508 case PLUS:
3509 /* In IEEE floating point, x+0 is not the same as x. Similarly
3510 for the other optimizations below. */
3511 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3512 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3513 break;
3515 if (op1 == CONST0_RTX (mode))
3516 return op0;
3518 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
3519 if (GET_CODE (op0) == NEG)
3520 return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
3521 else if (GET_CODE (op1) == NEG)
3522 return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
3524 /* Handle both-operands-constant cases. We can only add
3525 CONST_INTs to constants since the sum of relocatable symbols
3526 can't be handled by most assemblers. Don't add CONST_INT
3527 to CONST_INT since overflow won't be computed properly if wider
3528 than HOST_BITS_PER_WIDE_INT. */
3530 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
3531 && GET_CODE (op1) == CONST_INT)
3532 return plus_constant (op0, INTVAL (op1));
3533 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
3534 && GET_CODE (op0) == CONST_INT)
3535 return plus_constant (op1, INTVAL (op0));
3537 /* See if this is something like X * C - X or vice versa or
3538 if the multiplication is written as a shift. If so, we can
3539 distribute and make a new multiply, shift, or maybe just
3540 have X (if C is 2 in the example above). But don't make
3541 real multiply if we didn't have one before. */
3543 if (! FLOAT_MODE_P (mode))
3545 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3546 rtx lhs = op0, rhs = op1;
3547 int had_mult = 0;
3549 if (GET_CODE (lhs) == NEG)
3550 coeff0 = -1, lhs = XEXP (lhs, 0);
3551 else if (GET_CODE (lhs) == MULT
3552 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3554 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3555 had_mult = 1;
3557 else if (GET_CODE (lhs) == ASHIFT
3558 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3559 && INTVAL (XEXP (lhs, 1)) >= 0
3560 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3562 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3563 lhs = XEXP (lhs, 0);
3566 if (GET_CODE (rhs) == NEG)
3567 coeff1 = -1, rhs = XEXP (rhs, 0);
3568 else if (GET_CODE (rhs) == MULT
3569 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3571 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3572 had_mult = 1;
3574 else if (GET_CODE (rhs) == ASHIFT
3575 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3576 && INTVAL (XEXP (rhs, 1)) >= 0
3577 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3579 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3580 rhs = XEXP (rhs, 0);
3583 if (rtx_equal_p (lhs, rhs))
3585 tem = cse_gen_binary (MULT, mode, lhs,
3586 GEN_INT (coeff0 + coeff1));
3587 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3591 /* If one of the operands is a PLUS or a MINUS, see if we can
3592 simplify this by the associative law.
3593 Don't use the associative law for floating point.
3594 The inaccuracy makes it nonassociative,
3595 and subtle programs can break if operations are associated. */
3597 if (INTEGRAL_MODE_P (mode)
3598 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3599 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3600 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3601 return tem;
3602 break;
3604 case COMPARE:
3605 #ifdef HAVE_cc0
3606 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
3607 using cc0, in which case we want to leave it as a COMPARE
3608 so we can distinguish it from a register-register-copy.
3610 In IEEE floating point, x-0 is not the same as x. */
3612 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3613 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3614 && op1 == CONST0_RTX (mode))
3615 return op0;
3616 #else
3617 /* Do nothing here. */
3618 #endif
3619 break;
3621 case MINUS:
3622 /* None of these optimizations can be done for IEEE
3623 floating point. */
3624 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3625 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3626 break;
3628 /* We can't assume x-x is 0 even with non-IEEE floating point,
3629 but since it is zero except in very strange circumstances, we
3630 will treat it as zero with -ffast-math. */
3631 if (rtx_equal_p (op0, op1)
3632 && ! side_effects_p (op0)
3633 && (! FLOAT_MODE_P (mode) || flag_fast_math))
3634 return CONST0_RTX (mode);
3636 /* Change subtraction from zero into negation. */
3637 if (op0 == CONST0_RTX (mode))
3638 return gen_rtx (NEG, mode, op1);
3640 /* (-1 - a) is ~a. */
3641 if (op0 == constm1_rtx)
3642 return gen_rtx (NOT, mode, op1);
3644 /* Subtracting 0 has no effect. */
3645 if (op1 == CONST0_RTX (mode))
3646 return op0;
3648 /* See if this is something like X * C - X or vice versa or
3649 if the multiplication is written as a shift. If so, we can
3650 distribute and make a new multiply, shift, or maybe just
3651 have X (if C is 2 in the example above). But don't make
3652 real multiply if we didn't have one before. */
3654 if (! FLOAT_MODE_P (mode))
3656 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3657 rtx lhs = op0, rhs = op1;
3658 int had_mult = 0;
3660 if (GET_CODE (lhs) == NEG)
3661 coeff0 = -1, lhs = XEXP (lhs, 0);
3662 else if (GET_CODE (lhs) == MULT
3663 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3665 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3666 had_mult = 1;
3668 else if (GET_CODE (lhs) == ASHIFT
3669 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3670 && INTVAL (XEXP (lhs, 1)) >= 0
3671 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3673 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3674 lhs = XEXP (lhs, 0);
3677 if (GET_CODE (rhs) == NEG)
3678 coeff1 = - 1, rhs = XEXP (rhs, 0);
3679 else if (GET_CODE (rhs) == MULT
3680 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3682 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3683 had_mult = 1;
3685 else if (GET_CODE (rhs) == ASHIFT
3686 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3687 && INTVAL (XEXP (rhs, 1)) >= 0
3688 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3690 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3691 rhs = XEXP (rhs, 0);
3694 if (rtx_equal_p (lhs, rhs))
3696 tem = cse_gen_binary (MULT, mode, lhs,
3697 GEN_INT (coeff0 - coeff1));
3698 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3702 /* (a - (-b)) -> (a + b). */
3703 if (GET_CODE (op1) == NEG)
3704 return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3706 /* If one of the operands is a PLUS or a MINUS, see if we can
3707 simplify this by the associative law.
3708 Don't use the associative law for floating point.
3709 The inaccuracy makes it nonassociative,
3710 and subtle programs can break if operations are associated. */
3712 if (INTEGRAL_MODE_P (mode)
3713 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3714 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3715 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3716 return tem;
3718 /* Don't let a relocatable value get a negative coeff. */
3719 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
3720 return plus_constant (op0, - INTVAL (op1));
3722 /* (x - (x & y)) -> (x & ~y) */
3723 if (GET_CODE (op1) == AND)
3725 if (rtx_equal_p (op0, XEXP (op1, 0)))
3726 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 1)));
3727 if (rtx_equal_p (op0, XEXP (op1, 1)))
3728 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 0)));
3730 break;
3732 case MULT:
3733 if (op1 == constm1_rtx)
3735 tem = simplify_unary_operation (NEG, mode, op0, mode);
3737 return tem ? tem : gen_rtx (NEG, mode, op0);
3740 /* In IEEE floating point, x*0 is not always 0. */
3741 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3742 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3743 && op1 == CONST0_RTX (mode)
3744 && ! side_effects_p (op0))
3745 return op1;
3747 /* In IEEE floating point, x*1 is not equivalent to x for nans.
3748 However, ANSI says we can drop signals,
3749 so we can do this anyway. */
3750 if (op1 == CONST1_RTX (mode))
3751 return op0;
3753 /* Convert multiply by constant power of two into shift unless
3754 we are still generating RTL. This test is a kludge. */
3755 if (GET_CODE (op1) == CONST_INT
3756 && (val = exact_log2 (INTVAL (op1))) >= 0
3757 && ! rtx_equal_function_value_matters)
3758 return gen_rtx (ASHIFT, mode, op0, GEN_INT (val));
3760 if (GET_CODE (op1) == CONST_DOUBLE
3761 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
3763 REAL_VALUE_TYPE d;
3764 jmp_buf handler;
3765 int op1is2, op1ism1;
3767 if (setjmp (handler))
3768 return 0;
3770 set_float_handler (handler);
3771 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3772 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
3773 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
3774 set_float_handler (NULL_PTR);
3776 /* x*2 is x+x and x*(-1) is -x */
3777 if (op1is2 && GET_MODE (op0) == mode)
3778 return gen_rtx (PLUS, mode, op0, copy_rtx (op0));
3780 else if (op1ism1 && GET_MODE (op0) == mode)
3781 return gen_rtx (NEG, mode, op0);
3783 break;
3785 case IOR:
3786 if (op1 == const0_rtx)
3787 return op0;
3788 if (GET_CODE (op1) == CONST_INT
3789 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3790 return op1;
3791 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3792 return op0;
3793 /* A | (~A) -> -1 */
3794 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3795 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3796 && ! side_effects_p (op0)
3797 && GET_MODE_CLASS (mode) != MODE_CC)
3798 return constm1_rtx;
3799 break;
3801 case XOR:
3802 if (op1 == const0_rtx)
3803 return op0;
3804 if (GET_CODE (op1) == CONST_INT
3805 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3806 return gen_rtx (NOT, mode, op0);
3807 if (op0 == op1 && ! side_effects_p (op0)
3808 && GET_MODE_CLASS (mode) != MODE_CC)
3809 return const0_rtx;
3810 break;
3812 case AND:
3813 if (op1 == const0_rtx && ! side_effects_p (op0))
3814 return const0_rtx;
3815 if (GET_CODE (op1) == CONST_INT
3816 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3817 return op0;
3818 if (op0 == op1 && ! side_effects_p (op0)
3819 && GET_MODE_CLASS (mode) != MODE_CC)
3820 return op0;
3821 /* A & (~A) -> 0 */
3822 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3823 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3824 && ! side_effects_p (op0)
3825 && GET_MODE_CLASS (mode) != MODE_CC)
3826 return const0_rtx;
3827 break;
3829 case UDIV:
3830 /* Convert divide by power of two into shift (divide by 1 handled
3831 below). */
3832 if (GET_CODE (op1) == CONST_INT
3833 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
3834 return gen_rtx (LSHIFTRT, mode, op0, GEN_INT (arg1));
3836 /* ... fall through ... */
3838 case DIV:
3839 if (op1 == CONST1_RTX (mode))
3840 return op0;
3842 /* In IEEE floating point, 0/x is not always 0. */
3843 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3844 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3845 && op0 == CONST0_RTX (mode)
3846 && ! side_effects_p (op1))
3847 return op0;
3849 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3850 /* Change division by a constant into multiplication. Only do
3851 this with -ffast-math until an expert says it is safe in
3852 general. */
3853 else if (GET_CODE (op1) == CONST_DOUBLE
3854 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
3855 && op1 != CONST0_RTX (mode)
3856 && flag_fast_math)
3858 REAL_VALUE_TYPE d;
3859 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3861 if (! REAL_VALUES_EQUAL (d, dconst0))
3863 #if defined (REAL_ARITHMETIC)
3864 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
3865 return gen_rtx (MULT, mode, op0,
3866 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
3867 #else
3868 return gen_rtx (MULT, mode, op0,
3869 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
3870 #endif
3873 #endif
3874 break;
3876 case UMOD:
3877 /* Handle modulus by power of two (mod with 1 handled below). */
3878 if (GET_CODE (op1) == CONST_INT
3879 && exact_log2 (INTVAL (op1)) > 0)
3880 return gen_rtx (AND, mode, op0, GEN_INT (INTVAL (op1) - 1));
3882 /* ... fall through ... */
3884 case MOD:
3885 if ((op0 == const0_rtx || op1 == const1_rtx)
3886 && ! side_effects_p (op0) && ! side_effects_p (op1))
3887 return const0_rtx;
3888 break;
3890 case ROTATERT:
3891 case ROTATE:
3892 /* Rotating ~0 always results in ~0. */
3893 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
3894 && INTVAL (op0) == GET_MODE_MASK (mode)
3895 && ! side_effects_p (op1))
3896 return op0;
3898 /* ... fall through ... */
3900 case ASHIFT:
3901 case ASHIFTRT:
3902 case LSHIFTRT:
3903 if (op1 == const0_rtx)
3904 return op0;
3905 if (op0 == const0_rtx && ! side_effects_p (op1))
3906 return op0;
3907 break;
3909 case SMIN:
3910 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
3911 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
3912 && ! side_effects_p (op0))
3913 return op1;
3914 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3915 return op0;
3916 break;
3918 case SMAX:
3919 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
3920 && (INTVAL (op1)
3921 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
3922 && ! side_effects_p (op0))
3923 return op1;
3924 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3925 return op0;
3926 break;
3928 case UMIN:
3929 if (op1 == const0_rtx && ! side_effects_p (op0))
3930 return op1;
3931 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3932 return op0;
3933 break;
3935 case UMAX:
3936 if (op1 == constm1_rtx && ! side_effects_p (op0))
3937 return op1;
3938 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3939 return op0;
3940 break;
3942 default:
3943 abort ();
3946 return 0;
3949 /* Get the integer argument values in two forms:
3950 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3952 arg0 = INTVAL (op0);
3953 arg1 = INTVAL (op1);
3955 if (width < HOST_BITS_PER_WIDE_INT)
3957 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3958 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3960 arg0s = arg0;
3961 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3962 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3964 arg1s = arg1;
3965 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3966 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3968 else
3970 arg0s = arg0;
3971 arg1s = arg1;
3974 /* Compute the value of the arithmetic. */
3976 switch (code)
3978 case PLUS:
3979 val = arg0s + arg1s;
3980 break;
3982 case MINUS:
3983 val = arg0s - arg1s;
3984 break;
3986 case MULT:
3987 val = arg0s * arg1s;
3988 break;
3990 case DIV:
3991 if (arg1s == 0)
3992 return 0;
3993 val = arg0s / arg1s;
3994 break;
3996 case MOD:
3997 if (arg1s == 0)
3998 return 0;
3999 val = arg0s % arg1s;
4000 break;
4002 case UDIV:
4003 if (arg1 == 0)
4004 return 0;
4005 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4006 break;
4008 case UMOD:
4009 if (arg1 == 0)
4010 return 0;
4011 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4012 break;
4014 case AND:
4015 val = arg0 & arg1;
4016 break;
4018 case IOR:
4019 val = arg0 | arg1;
4020 break;
4022 case XOR:
4023 val = arg0 ^ arg1;
4024 break;
4026 case LSHIFTRT:
4027 /* If shift count is undefined, don't fold it; let the machine do
4028 what it wants. But truncate it if the machine will do that. */
4029 if (arg1 < 0)
4030 return 0;
4032 #ifdef SHIFT_COUNT_TRUNCATED
4033 if (SHIFT_COUNT_TRUNCATED)
4034 arg1 %= width;
4035 #endif
4037 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
4038 break;
4040 case ASHIFT:
4041 if (arg1 < 0)
4042 return 0;
4044 #ifdef SHIFT_COUNT_TRUNCATED
4045 if (SHIFT_COUNT_TRUNCATED)
4046 arg1 %= width;
4047 #endif
4049 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
4050 break;
4052 case ASHIFTRT:
4053 if (arg1 < 0)
4054 return 0;
4056 #ifdef SHIFT_COUNT_TRUNCATED
4057 if (SHIFT_COUNT_TRUNCATED)
4058 arg1 %= width;
4059 #endif
4061 val = arg0s >> arg1;
4063 /* Bootstrap compiler may not have sign extended the right shift.
4064 Manually extend the sign to insure bootstrap cc matches gcc. */
4065 if (arg0s < 0 && arg1 > 0)
4066 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
4068 break;
4070 case ROTATERT:
4071 if (arg1 < 0)
4072 return 0;
4074 arg1 %= width;
4075 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4076 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4077 break;
4079 case ROTATE:
4080 if (arg1 < 0)
4081 return 0;
4083 arg1 %= width;
4084 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4085 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4086 break;
4088 case COMPARE:
4089 /* Do nothing here. */
4090 return 0;
4092 case SMIN:
4093 val = arg0s <= arg1s ? arg0s : arg1s;
4094 break;
4096 case UMIN:
4097 val = ((unsigned HOST_WIDE_INT) arg0
4098 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4099 break;
4101 case SMAX:
4102 val = arg0s > arg1s ? arg0s : arg1s;
4103 break;
4105 case UMAX:
4106 val = ((unsigned HOST_WIDE_INT) arg0
4107 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4108 break;
4110 default:
4111 abort ();
4114 /* Clear the bits that don't belong in our mode, unless they and our sign
4115 bit are all one. So we get either a reasonable negative value or a
4116 reasonable unsigned value for this mode. */
4117 if (width < HOST_BITS_PER_WIDE_INT
4118 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4119 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4120 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4122 return GEN_INT (val);
4125 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4126 PLUS or MINUS.
4128 Rather than test for specific case, we do this by a brute-force method
4129 and do all possible simplifications until no more changes occur. Then
4130 we rebuild the operation. */
4132 static rtx
4133 simplify_plus_minus (code, mode, op0, op1)
4134 enum rtx_code code;
4135 enum machine_mode mode;
4136 rtx op0, op1;
4138 rtx ops[8];
4139 int negs[8];
4140 rtx result, tem;
4141 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
4142 int first = 1, negate = 0, changed;
4143 int i, j;
4145 bzero ((char *) ops, sizeof ops);
4147 /* Set up the two operands and then expand them until nothing has been
4148 changed. If we run out of room in our array, give up; this should
4149 almost never happen. */
4151 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
4153 changed = 1;
4154 while (changed)
4156 changed = 0;
4158 for (i = 0; i < n_ops; i++)
4159 switch (GET_CODE (ops[i]))
4161 case PLUS:
4162 case MINUS:
4163 if (n_ops == 7)
4164 return 0;
4166 ops[n_ops] = XEXP (ops[i], 1);
4167 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
4168 ops[i] = XEXP (ops[i], 0);
4169 input_ops++;
4170 changed = 1;
4171 break;
4173 case NEG:
4174 ops[i] = XEXP (ops[i], 0);
4175 negs[i] = ! negs[i];
4176 changed = 1;
4177 break;
4179 case CONST:
4180 ops[i] = XEXP (ops[i], 0);
4181 input_consts++;
4182 changed = 1;
4183 break;
4185 case NOT:
4186 /* ~a -> (-a - 1) */
4187 if (n_ops != 7)
4189 ops[n_ops] = constm1_rtx;
4190 negs[n_ops++] = negs[i];
4191 ops[i] = XEXP (ops[i], 0);
4192 negs[i] = ! negs[i];
4193 changed = 1;
4195 break;
4197 case CONST_INT:
4198 if (negs[i])
4199 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
4200 break;
4204 /* If we only have two operands, we can't do anything. */
4205 if (n_ops <= 2)
4206 return 0;
4208 /* Now simplify each pair of operands until nothing changes. The first
4209 time through just simplify constants against each other. */
4211 changed = 1;
4212 while (changed)
4214 changed = first;
4216 for (i = 0; i < n_ops - 1; i++)
4217 for (j = i + 1; j < n_ops; j++)
4218 if (ops[i] != 0 && ops[j] != 0
4219 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
4221 rtx lhs = ops[i], rhs = ops[j];
4222 enum rtx_code ncode = PLUS;
4224 if (negs[i] && ! negs[j])
4225 lhs = ops[j], rhs = ops[i], ncode = MINUS;
4226 else if (! negs[i] && negs[j])
4227 ncode = MINUS;
4229 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4230 if (tem)
4232 ops[i] = tem, ops[j] = 0;
4233 negs[i] = negs[i] && negs[j];
4234 if (GET_CODE (tem) == NEG)
4235 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
4237 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
4238 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
4239 changed = 1;
4243 first = 0;
4246 /* Pack all the operands to the lower-numbered entries and give up if
4247 we didn't reduce the number of operands we had. Make sure we
4248 count a CONST as two operands. If we have the same number of
4249 operands, but have made more CONSTs than we had, this is also
4250 an improvement, so accept it. */
4252 for (i = 0, j = 0; j < n_ops; j++)
4253 if (ops[j] != 0)
4255 ops[i] = ops[j], negs[i++] = negs[j];
4256 if (GET_CODE (ops[j]) == CONST)
4257 n_consts++;
4260 if (i + n_consts > input_ops
4261 || (i + n_consts == input_ops && n_consts <= input_consts))
4262 return 0;
4264 n_ops = i;
4266 /* If we have a CONST_INT, put it last. */
4267 for (i = 0; i < n_ops - 1; i++)
4268 if (GET_CODE (ops[i]) == CONST_INT)
4270 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
4271 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
4274 /* Put a non-negated operand first. If there aren't any, make all
4275 operands positive and negate the whole thing later. */
4276 for (i = 0; i < n_ops && negs[i]; i++)
4279 if (i == n_ops)
4281 for (i = 0; i < n_ops; i++)
4282 negs[i] = 0;
4283 negate = 1;
4285 else if (i != 0)
4287 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
4288 j = negs[0], negs[0] = negs[i], negs[i] = j;
4291 /* Now make the result by performing the requested operations. */
4292 result = ops[0];
4293 for (i = 1; i < n_ops; i++)
4294 result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
4296 return negate ? gen_rtx (NEG, mode, result) : result;
4299 /* Make a binary operation by properly ordering the operands and
4300 seeing if the expression folds. */
4302 static rtx
4303 cse_gen_binary (code, mode, op0, op1)
4304 enum rtx_code code;
4305 enum machine_mode mode;
4306 rtx op0, op1;
4308 rtx tem;
4310 /* Put complex operands first and constants second if commutative. */
4311 if (GET_RTX_CLASS (code) == 'c'
4312 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
4313 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
4314 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
4315 || (GET_CODE (op0) == SUBREG
4316 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
4317 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
4318 tem = op0, op0 = op1, op1 = tem;
4320 /* If this simplifies, do it. */
4321 tem = simplify_binary_operation (code, mode, op0, op1);
4323 if (tem)
4324 return tem;
4326 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
4327 just form the operation. */
4329 if (code == PLUS && GET_CODE (op1) == CONST_INT
4330 && GET_MODE (op0) != VOIDmode)
4331 return plus_constant (op0, INTVAL (op1));
4332 else if (code == MINUS && GET_CODE (op1) == CONST_INT
4333 && GET_MODE (op0) != VOIDmode)
4334 return plus_constant (op0, - INTVAL (op1));
4335 else
4336 return gen_rtx (code, mode, op0, op1);
4339 /* Like simplify_binary_operation except used for relational operators.
4340 MODE is the mode of the operands, not that of the result. If MODE
4341 is VOIDmode, both operands must also be VOIDmode and we compare the
4342 operands in "infinite precision".
4344 If no simplification is possible, this function returns zero. Otherwise,
4345 it returns either const_true_rtx or const0_rtx. */
4348 simplify_relational_operation (code, mode, op0, op1)
4349 enum rtx_code code;
4350 enum machine_mode mode;
4351 rtx op0, op1;
4353 int equal, op0lt, op0ltu, op1lt, op1ltu;
4354 rtx tem;
4356 /* If op0 is a compare, extract the comparison arguments from it. */
4357 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4358 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
4360 /* We can't simplify MODE_CC values since we don't know what the
4361 actual comparison is. */
4362 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
4363 #ifdef HAVE_cc0
4364 || op0 == cc0_rtx
4365 #endif
4367 return 0;
4369 /* For integer comparisons of A and B maybe we can simplify A - B and can
4370 then simplify a comparison of that with zero. If A and B are both either
4371 a register or a CONST_INT, this can't help; testing for these cases will
4372 prevent infinite recursion here and speed things up.
4374 If CODE is an unsigned comparison, we can only do this if A - B is a
4375 constant integer, and then we have to compare that integer with zero as a
4376 signed comparison. Note that this will give the incorrect result from
4377 comparisons that overflow. Since these are undefined, this is probably
4378 OK. If it causes a problem, we can check for A or B being an address
4379 (fp + const or SYMBOL_REF) and only do it in that case. */
4381 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
4382 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
4383 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
4384 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4385 && (GET_CODE (tem) == CONST_INT
4386 || (code != GTU && code != GEU &&
4387 code != LTU && code != LEU)))
4388 return simplify_relational_operation (signed_condition (code),
4389 mode, tem, const0_rtx);
4391 /* For non-IEEE floating-point, if the two operands are equal, we know the
4392 result. */
4393 if (rtx_equal_p (op0, op1)
4394 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4395 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
4396 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
4398 /* If the operands are floating-point constants, see if we can fold
4399 the result. */
4400 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4401 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
4402 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
4404 REAL_VALUE_TYPE d0, d1;
4405 jmp_buf handler;
4407 if (setjmp (handler))
4408 return 0;
4410 set_float_handler (handler);
4411 REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
4412 REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
4413 equal = REAL_VALUES_EQUAL (d0, d1);
4414 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
4415 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
4416 set_float_handler (NULL_PTR);
4418 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4420 /* Otherwise, see if the operands are both integers. */
4421 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4422 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
4423 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
4425 int width = GET_MODE_BITSIZE (mode);
4426 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4427 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4429 /* Get the two words comprising each integer constant. */
4430 if (GET_CODE (op0) == CONST_DOUBLE)
4432 l0u = l0s = CONST_DOUBLE_LOW (op0);
4433 h0u = h0s = CONST_DOUBLE_HIGH (op0);
4435 else
4437 l0u = l0s = INTVAL (op0);
4438 h0u = 0, h0s = l0s < 0 ? -1 : 0;
4441 if (GET_CODE (op1) == CONST_DOUBLE)
4443 l1u = l1s = CONST_DOUBLE_LOW (op1);
4444 h1u = h1s = CONST_DOUBLE_HIGH (op1);
4446 else
4448 l1u = l1s = INTVAL (op1);
4449 h1u = 0, h1s = l1s < 0 ? -1 : 0;
4452 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4453 we have to sign or zero-extend the values. */
4454 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4455 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
4457 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4459 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4460 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4462 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4463 l0s |= ((HOST_WIDE_INT) (-1) << width);
4465 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4466 l1s |= ((HOST_WIDE_INT) (-1) << width);
4469 equal = (h0u == h1u && l0u == l1u);
4470 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
4471 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
4472 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4473 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4476 /* Otherwise, there are some code-specific tests we can make. */
4477 else
4479 switch (code)
4481 case EQ:
4482 /* References to the frame plus a constant or labels cannot
4483 be zero, but a SYMBOL_REF can due to #pragma weak. */
4484 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4485 || GET_CODE (op0) == LABEL_REF)
4486 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4487 /* On some machines, the ap reg can be 0 sometimes. */
4488 && op0 != arg_pointer_rtx
4489 #endif
4491 return const0_rtx;
4492 break;
4494 case NE:
4495 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4496 || GET_CODE (op0) == LABEL_REF)
4497 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4498 && op0 != arg_pointer_rtx
4499 #endif
4501 return const_true_rtx;
4502 break;
4504 case GEU:
4505 /* Unsigned values are never negative. */
4506 if (op1 == const0_rtx)
4507 return const_true_rtx;
4508 break;
4510 case LTU:
4511 if (op1 == const0_rtx)
4512 return const0_rtx;
4513 break;
4515 case LEU:
4516 /* Unsigned values are never greater than the largest
4517 unsigned value. */
4518 if (GET_CODE (op1) == CONST_INT
4519 && INTVAL (op1) == GET_MODE_MASK (mode)
4520 && INTEGRAL_MODE_P (mode))
4521 return const_true_rtx;
4522 break;
4524 case GTU:
4525 if (GET_CODE (op1) == CONST_INT
4526 && INTVAL (op1) == GET_MODE_MASK (mode)
4527 && INTEGRAL_MODE_P (mode))
4528 return const0_rtx;
4529 break;
4532 return 0;
4535 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4536 as appropriate. */
4537 switch (code)
4539 case EQ:
4540 return equal ? const_true_rtx : const0_rtx;
4541 case NE:
4542 return ! equal ? const_true_rtx : const0_rtx;
4543 case LT:
4544 return op0lt ? const_true_rtx : const0_rtx;
4545 case GT:
4546 return op1lt ? const_true_rtx : const0_rtx;
4547 case LTU:
4548 return op0ltu ? const_true_rtx : const0_rtx;
4549 case GTU:
4550 return op1ltu ? const_true_rtx : const0_rtx;
4551 case LE:
4552 return equal || op0lt ? const_true_rtx : const0_rtx;
4553 case GE:
4554 return equal || op1lt ? const_true_rtx : const0_rtx;
4555 case LEU:
4556 return equal || op0ltu ? const_true_rtx : const0_rtx;
4557 case GEU:
4558 return equal || op1ltu ? const_true_rtx : const0_rtx;
4561 abort ();
4564 /* Simplify CODE, an operation with result mode MODE and three operands,
4565 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4566 a constant. Return 0 if no simplifications is possible. */
4569 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
4570 enum rtx_code code;
4571 enum machine_mode mode, op0_mode;
4572 rtx op0, op1, op2;
4574 int width = GET_MODE_BITSIZE (mode);
4576 /* VOIDmode means "infinite" precision. */
4577 if (width == 0)
4578 width = HOST_BITS_PER_WIDE_INT;
4580 switch (code)
4582 case SIGN_EXTRACT:
4583 case ZERO_EXTRACT:
4584 if (GET_CODE (op0) == CONST_INT
4585 && GET_CODE (op1) == CONST_INT
4586 && GET_CODE (op2) == CONST_INT
4587 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
4588 && width <= HOST_BITS_PER_WIDE_INT)
4590 /* Extracting a bit-field from a constant */
4591 HOST_WIDE_INT val = INTVAL (op0);
4593 if (BITS_BIG_ENDIAN)
4594 val >>= (GET_MODE_BITSIZE (op0_mode)
4595 - INTVAL (op2) - INTVAL (op1));
4596 else
4597 val >>= INTVAL (op2);
4599 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4601 /* First zero-extend. */
4602 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4603 /* If desired, propagate sign bit. */
4604 if (code == SIGN_EXTRACT
4605 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4606 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4609 /* Clear the bits that don't belong in our mode,
4610 unless they and our sign bit are all one.
4611 So we get either a reasonable negative value or a reasonable
4612 unsigned value for this mode. */
4613 if (width < HOST_BITS_PER_WIDE_INT
4614 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4615 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4616 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4618 return GEN_INT (val);
4620 break;
4622 case IF_THEN_ELSE:
4623 if (GET_CODE (op0) == CONST_INT)
4624 return op0 != const0_rtx ? op1 : op2;
4625 break;
4627 default:
4628 abort ();
4631 return 0;
4634 /* If X is a nontrivial arithmetic operation on an argument
4635 for which a constant value can be determined, return
4636 the result of operating on that value, as a constant.
4637 Otherwise, return X, possibly with one or more operands
4638 modified by recursive calls to this function.
4640 If X is a register whose contents are known, we do NOT
4641 return those contents here. equiv_constant is called to
4642 perform that task.
4644 INSN is the insn that we may be modifying. If it is 0, make a copy
4645 of X before modifying it. */
4647 static rtx
4648 fold_rtx (x, insn)
4649 rtx x;
4650 rtx insn;
4652 register enum rtx_code code;
4653 register enum machine_mode mode;
4654 register char *fmt;
4655 register int i;
4656 rtx new = 0;
4657 int copied = 0;
4658 int must_swap = 0;
4660 /* Folded equivalents of first two operands of X. */
4661 rtx folded_arg0;
4662 rtx folded_arg1;
4664 /* Constant equivalents of first three operands of X;
4665 0 when no such equivalent is known. */
4666 rtx const_arg0;
4667 rtx const_arg1;
4668 rtx const_arg2;
4670 /* The mode of the first operand of X. We need this for sign and zero
4671 extends. */
4672 enum machine_mode mode_arg0;
4674 if (x == 0)
4675 return x;
4677 mode = GET_MODE (x);
4678 code = GET_CODE (x);
4679 switch (code)
4681 case CONST:
4682 case CONST_INT:
4683 case CONST_DOUBLE:
4684 case SYMBOL_REF:
4685 case LABEL_REF:
4686 case REG:
4687 /* No use simplifying an EXPR_LIST
4688 since they are used only for lists of args
4689 in a function call's REG_EQUAL note. */
4690 case EXPR_LIST:
4691 return x;
4693 #ifdef HAVE_cc0
4694 case CC0:
4695 return prev_insn_cc0;
4696 #endif
4698 case PC:
4699 /* If the next insn is a CODE_LABEL followed by a jump table,
4700 PC's value is a LABEL_REF pointing to that label. That
4701 lets us fold switch statements on the Vax. */
4702 if (insn && GET_CODE (insn) == JUMP_INSN)
4704 rtx next = next_nonnote_insn (insn);
4706 if (next && GET_CODE (next) == CODE_LABEL
4707 && NEXT_INSN (next) != 0
4708 && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
4709 && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
4710 || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
4711 return gen_rtx (LABEL_REF, Pmode, next);
4713 break;
4715 case SUBREG:
4716 /* See if we previously assigned a constant value to this SUBREG. */
4717 if ((new = lookup_as_function (x, CONST_INT)) != 0
4718 || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
4719 return new;
4721 /* If this is a paradoxical SUBREG, we have no idea what value the
4722 extra bits would have. However, if the operand is equivalent
4723 to a SUBREG whose operand is the same as our mode, and all the
4724 modes are within a word, we can just use the inner operand
4725 because these SUBREGs just say how to treat the register.
4727 Similarly if we find an integer constant. */
4729 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4731 enum machine_mode imode = GET_MODE (SUBREG_REG (x));
4732 struct table_elt *elt;
4734 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
4735 && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
4736 && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
4737 imode)) != 0)
4738 for (elt = elt->first_same_value;
4739 elt; elt = elt->next_same_value)
4741 if (CONSTANT_P (elt->exp)
4742 && GET_MODE (elt->exp) == VOIDmode)
4743 return elt->exp;
4745 if (GET_CODE (elt->exp) == SUBREG
4746 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4747 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4748 return copy_rtx (SUBREG_REG (elt->exp));
4751 return x;
4754 /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
4755 We might be able to if the SUBREG is extracting a single word in an
4756 integral mode or extracting the low part. */
4758 folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
4759 const_arg0 = equiv_constant (folded_arg0);
4760 if (const_arg0)
4761 folded_arg0 = const_arg0;
4763 if (folded_arg0 != SUBREG_REG (x))
4765 new = 0;
4767 if (GET_MODE_CLASS (mode) == MODE_INT
4768 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
4769 && GET_MODE (SUBREG_REG (x)) != VOIDmode)
4770 new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
4771 GET_MODE (SUBREG_REG (x)));
4772 if (new == 0 && subreg_lowpart_p (x))
4773 new = gen_lowpart_if_possible (mode, folded_arg0);
4774 if (new)
4775 return new;
4778 /* If this is a narrowing SUBREG and our operand is a REG, see if
4779 we can find an equivalence for REG that is an arithmetic operation
4780 in a wider mode where both operands are paradoxical SUBREGs
4781 from objects of our result mode. In that case, we couldn't report
4782 an equivalent value for that operation, since we don't know what the
4783 extra bits will be. But we can find an equivalence for this SUBREG
4784 by folding that operation is the narrow mode. This allows us to
4785 fold arithmetic in narrow modes when the machine only supports
4786 word-sized arithmetic.
4788 Also look for a case where we have a SUBREG whose operand is the
4789 same as our result. If both modes are smaller than a word, we
4790 are simply interpreting a register in different modes and we
4791 can use the inner value. */
4793 if (GET_CODE (folded_arg0) == REG
4794 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
4795 && subreg_lowpart_p (x))
4797 struct table_elt *elt;
4799 /* We can use HASH here since we know that canon_hash won't be
4800 called. */
4801 elt = lookup (folded_arg0,
4802 HASH (folded_arg0, GET_MODE (folded_arg0)),
4803 GET_MODE (folded_arg0));
4805 if (elt)
4806 elt = elt->first_same_value;
4808 for (; elt; elt = elt->next_same_value)
4810 enum rtx_code eltcode = GET_CODE (elt->exp);
4812 /* Just check for unary and binary operations. */
4813 if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
4814 && GET_CODE (elt->exp) != SIGN_EXTEND
4815 && GET_CODE (elt->exp) != ZERO_EXTEND
4816 && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4817 && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
4819 rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
4821 if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4822 op0 = fold_rtx (op0, NULL_RTX);
4824 op0 = equiv_constant (op0);
4825 if (op0)
4826 new = simplify_unary_operation (GET_CODE (elt->exp), mode,
4827 op0, mode);
4829 else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
4830 || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
4831 && eltcode != DIV && eltcode != MOD
4832 && eltcode != UDIV && eltcode != UMOD
4833 && eltcode != ASHIFTRT && eltcode != LSHIFTRT
4834 && eltcode != ROTATE && eltcode != ROTATERT
4835 && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4836 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
4837 == mode))
4838 || CONSTANT_P (XEXP (elt->exp, 0)))
4839 && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
4840 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
4841 == mode))
4842 || CONSTANT_P (XEXP (elt->exp, 1))))
4844 rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
4845 rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
4847 if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4848 op0 = fold_rtx (op0, NULL_RTX);
4850 if (op0)
4851 op0 = equiv_constant (op0);
4853 if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
4854 op1 = fold_rtx (op1, NULL_RTX);
4856 if (op1)
4857 op1 = equiv_constant (op1);
4859 /* If we are looking for the low SImode part of
4860 (ashift:DI c (const_int 32)), it doesn't work
4861 to compute that in SImode, because a 32-bit shift
4862 in SImode is unpredictable. We know the value is 0. */
4863 if (op0 && op1
4864 && GET_CODE (elt->exp) == ASHIFT
4865 && GET_CODE (op1) == CONST_INT
4866 && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
4868 if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
4870 /* If the count fits in the inner mode's width,
4871 but exceeds the outer mode's width,
4872 the value will get truncated to 0
4873 by the subreg. */
4874 new = const0_rtx;
4875 else
4876 /* If the count exceeds even the inner mode's width,
4877 don't fold this expression. */
4878 new = 0;
4880 else if (op0 && op1)
4881 new = simplify_binary_operation (GET_CODE (elt->exp), mode,
4882 op0, op1);
4885 else if (GET_CODE (elt->exp) == SUBREG
4886 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4887 && (GET_MODE_SIZE (GET_MODE (folded_arg0))
4888 <= UNITS_PER_WORD)
4889 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4890 new = copy_rtx (SUBREG_REG (elt->exp));
4892 if (new)
4893 return new;
4897 return x;
4899 case NOT:
4900 case NEG:
4901 /* If we have (NOT Y), see if Y is known to be (NOT Z).
4902 If so, (NOT Y) simplifies to Z. Similarly for NEG. */
4903 new = lookup_as_function (XEXP (x, 0), code);
4904 if (new)
4905 return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
4906 break;
4908 case MEM:
4909 /* If we are not actually processing an insn, don't try to find the
4910 best address. Not only don't we care, but we could modify the
4911 MEM in an invalid way since we have no insn to validate against. */
4912 if (insn != 0)
4913 find_best_addr (insn, &XEXP (x, 0));
4916 /* Even if we don't fold in the insn itself,
4917 we can safely do so here, in hopes of getting a constant. */
4918 rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
4919 rtx base = 0;
4920 HOST_WIDE_INT offset = 0;
4922 if (GET_CODE (addr) == REG
4923 && REGNO_QTY_VALID_P (REGNO (addr))
4924 && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
4925 && qty_const[reg_qty[REGNO (addr)]] != 0)
4926 addr = qty_const[reg_qty[REGNO (addr)]];
4928 /* If address is constant, split it into a base and integer offset. */
4929 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
4930 base = addr;
4931 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4932 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4934 base = XEXP (XEXP (addr, 0), 0);
4935 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
4937 else if (GET_CODE (addr) == LO_SUM
4938 && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
4939 base = XEXP (addr, 1);
4941 /* If this is a constant pool reference, we can fold it into its
4942 constant to allow better value tracking. */
4943 if (base && GET_CODE (base) == SYMBOL_REF
4944 && CONSTANT_POOL_ADDRESS_P (base))
4946 rtx constant = get_pool_constant (base);
4947 enum machine_mode const_mode = get_pool_mode (base);
4948 rtx new;
4950 if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
4951 constant_pool_entries_cost = COST (constant);
4953 /* If we are loading the full constant, we have an equivalence. */
4954 if (offset == 0 && mode == const_mode)
4955 return constant;
4957 /* If this actually isn't a constant (wierd!), we can't do
4958 anything. Otherwise, handle the two most common cases:
4959 extracting a word from a multi-word constant, and extracting
4960 the low-order bits. Other cases don't seem common enough to
4961 worry about. */
4962 if (! CONSTANT_P (constant))
4963 return x;
4965 if (GET_MODE_CLASS (mode) == MODE_INT
4966 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
4967 && offset % UNITS_PER_WORD == 0
4968 && (new = operand_subword (constant,
4969 offset / UNITS_PER_WORD,
4970 0, const_mode)) != 0)
4971 return new;
4973 if (((BYTES_BIG_ENDIAN
4974 && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
4975 || (! BYTES_BIG_ENDIAN && offset == 0))
4976 && (new = gen_lowpart_if_possible (mode, constant)) != 0)
4977 return new;
4980 /* If this is a reference to a label at a known position in a jump
4981 table, we also know its value. */
4982 if (base && GET_CODE (base) == LABEL_REF)
4984 rtx label = XEXP (base, 0);
4985 rtx table_insn = NEXT_INSN (label);
4987 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
4988 && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
4990 rtx table = PATTERN (table_insn);
4992 if (offset >= 0
4993 && (offset / GET_MODE_SIZE (GET_MODE (table))
4994 < XVECLEN (table, 0)))
4995 return XVECEXP (table, 0,
4996 offset / GET_MODE_SIZE (GET_MODE (table)));
4998 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
4999 && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
5001 rtx table = PATTERN (table_insn);
5003 if (offset >= 0
5004 && (offset / GET_MODE_SIZE (GET_MODE (table))
5005 < XVECLEN (table, 1)))
5007 offset /= GET_MODE_SIZE (GET_MODE (table));
5008 new = gen_rtx (MINUS, Pmode, XVECEXP (table, 1, offset),
5009 XEXP (table, 0));
5011 if (GET_MODE (table) != Pmode)
5012 new = gen_rtx (TRUNCATE, GET_MODE (table), new);
5014 /* Indicate this is a constant. This isn't a
5015 valid form of CONST, but it will only be used
5016 to fold the next insns and then discarded, so
5017 it should be safe. */
5018 return gen_rtx (CONST, GET_MODE (new), new);
5023 return x;
5027 const_arg0 = 0;
5028 const_arg1 = 0;
5029 const_arg2 = 0;
5030 mode_arg0 = VOIDmode;
5032 /* Try folding our operands.
5033 Then see which ones have constant values known. */
5035 fmt = GET_RTX_FORMAT (code);
5036 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5037 if (fmt[i] == 'e')
5039 rtx arg = XEXP (x, i);
5040 rtx folded_arg = arg, const_arg = 0;
5041 enum machine_mode mode_arg = GET_MODE (arg);
5042 rtx cheap_arg, expensive_arg;
5043 rtx replacements[2];
5044 int j;
5046 /* Most arguments are cheap, so handle them specially. */
5047 switch (GET_CODE (arg))
5049 case REG:
5050 /* This is the same as calling equiv_constant; it is duplicated
5051 here for speed. */
5052 if (REGNO_QTY_VALID_P (REGNO (arg))
5053 && qty_const[reg_qty[REGNO (arg)]] != 0
5054 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
5055 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
5056 const_arg
5057 = gen_lowpart_if_possible (GET_MODE (arg),
5058 qty_const[reg_qty[REGNO (arg)]]);
5059 break;
5061 case CONST:
5062 case CONST_INT:
5063 case SYMBOL_REF:
5064 case LABEL_REF:
5065 case CONST_DOUBLE:
5066 const_arg = arg;
5067 break;
5069 #ifdef HAVE_cc0
5070 case CC0:
5071 folded_arg = prev_insn_cc0;
5072 mode_arg = prev_insn_cc0_mode;
5073 const_arg = equiv_constant (folded_arg);
5074 break;
5075 #endif
5077 default:
5078 folded_arg = fold_rtx (arg, insn);
5079 const_arg = equiv_constant (folded_arg);
5082 /* For the first three operands, see if the operand
5083 is constant or equivalent to a constant. */
5084 switch (i)
5086 case 0:
5087 folded_arg0 = folded_arg;
5088 const_arg0 = const_arg;
5089 mode_arg0 = mode_arg;
5090 break;
5091 case 1:
5092 folded_arg1 = folded_arg;
5093 const_arg1 = const_arg;
5094 break;
5095 case 2:
5096 const_arg2 = const_arg;
5097 break;
5100 /* Pick the least expensive of the folded argument and an
5101 equivalent constant argument. */
5102 if (const_arg == 0 || const_arg == folded_arg
5103 || COST (const_arg) > COST (folded_arg))
5104 cheap_arg = folded_arg, expensive_arg = const_arg;
5105 else
5106 cheap_arg = const_arg, expensive_arg = folded_arg;
5108 /* Try to replace the operand with the cheapest of the two
5109 possibilities. If it doesn't work and this is either of the first
5110 two operands of a commutative operation, try swapping them.
5111 If THAT fails, try the more expensive, provided it is cheaper
5112 than what is already there. */
5114 if (cheap_arg == XEXP (x, i))
5115 continue;
5117 if (insn == 0 && ! copied)
5119 x = copy_rtx (x);
5120 copied = 1;
5123 replacements[0] = cheap_arg, replacements[1] = expensive_arg;
5124 for (j = 0;
5125 j < 2 && replacements[j]
5126 && COST (replacements[j]) < COST (XEXP (x, i));
5127 j++)
5129 if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
5130 break;
5132 if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
5134 validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
5135 validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
5137 if (apply_change_group ())
5139 /* Swap them back to be invalid so that this loop can
5140 continue and flag them to be swapped back later. */
5141 rtx tem;
5143 tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
5144 XEXP (x, 1) = tem;
5145 must_swap = 1;
5146 break;
5152 else if (fmt[i] == 'E')
5153 /* Don't try to fold inside of a vector of expressions.
5154 Doing nothing is harmless. */
5157 /* If a commutative operation, place a constant integer as the second
5158 operand unless the first operand is also a constant integer. Otherwise,
5159 place any constant second unless the first operand is also a constant. */
5161 if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
5163 if (must_swap || (const_arg0
5164 && (const_arg1 == 0
5165 || (GET_CODE (const_arg0) == CONST_INT
5166 && GET_CODE (const_arg1) != CONST_INT))))
5168 register rtx tem = XEXP (x, 0);
5170 if (insn == 0 && ! copied)
5172 x = copy_rtx (x);
5173 copied = 1;
5176 validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
5177 validate_change (insn, &XEXP (x, 1), tem, 1);
5178 if (apply_change_group ())
5180 tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
5181 tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
5186 /* If X is an arithmetic operation, see if we can simplify it. */
5188 switch (GET_RTX_CLASS (code))
5190 case '1':
5192 int is_const = 0;
5194 /* We can't simplify extension ops unless we know the
5195 original mode. */
5196 if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
5197 && mode_arg0 == VOIDmode)
5198 break;
5200 /* If we had a CONST, strip it off and put it back later if we
5201 fold. */
5202 if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
5203 is_const = 1, const_arg0 = XEXP (const_arg0, 0);
5205 new = simplify_unary_operation (code, mode,
5206 const_arg0 ? const_arg0 : folded_arg0,
5207 mode_arg0);
5208 if (new != 0 && is_const)
5209 new = gen_rtx (CONST, mode, new);
5211 break;
5213 case '<':
5214 /* See what items are actually being compared and set FOLDED_ARG[01]
5215 to those values and CODE to the actual comparison code. If any are
5216 constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
5217 do anything if both operands are already known to be constant. */
5219 if (const_arg0 == 0 || const_arg1 == 0)
5221 struct table_elt *p0, *p1;
5222 rtx true = const_true_rtx, false = const0_rtx;
5223 enum machine_mode mode_arg1;
5225 #ifdef FLOAT_STORE_FLAG_VALUE
5226 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5228 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5229 mode);
5230 false = CONST0_RTX (mode);
5232 #endif
5234 code = find_comparison_args (code, &folded_arg0, &folded_arg1,
5235 &mode_arg0, &mode_arg1);
5236 const_arg0 = equiv_constant (folded_arg0);
5237 const_arg1 = equiv_constant (folded_arg1);
5239 /* If the mode is VOIDmode or a MODE_CC mode, we don't know
5240 what kinds of things are being compared, so we can't do
5241 anything with this comparison. */
5243 if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
5244 break;
5246 /* If we do not now have two constants being compared, see if we
5247 can nevertheless deduce some things about the comparison. */
5248 if (const_arg0 == 0 || const_arg1 == 0)
5250 /* Is FOLDED_ARG0 frame-pointer plus a constant? Or non-explicit
5251 constant? These aren't zero, but we don't know their sign. */
5252 if (const_arg1 == const0_rtx
5253 && (NONZERO_BASE_PLUS_P (folded_arg0)
5254 #if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
5255 come out as 0. */
5256 || GET_CODE (folded_arg0) == SYMBOL_REF
5257 #endif
5258 || GET_CODE (folded_arg0) == LABEL_REF
5259 || GET_CODE (folded_arg0) == CONST))
5261 if (code == EQ)
5262 return false;
5263 else if (code == NE)
5264 return true;
5267 /* See if the two operands are the same. We don't do this
5268 for IEEE floating-point since we can't assume x == x
5269 since x might be a NaN. */
5271 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5272 || ! FLOAT_MODE_P (mode_arg0) || flag_fast_math)
5273 && (folded_arg0 == folded_arg1
5274 || (GET_CODE (folded_arg0) == REG
5275 && GET_CODE (folded_arg1) == REG
5276 && (reg_qty[REGNO (folded_arg0)]
5277 == reg_qty[REGNO (folded_arg1)]))
5278 || ((p0 = lookup (folded_arg0,
5279 (safe_hash (folded_arg0, mode_arg0)
5280 % NBUCKETS), mode_arg0))
5281 && (p1 = lookup (folded_arg1,
5282 (safe_hash (folded_arg1, mode_arg0)
5283 % NBUCKETS), mode_arg0))
5284 && p0->first_same_value == p1->first_same_value)))
5285 return ((code == EQ || code == LE || code == GE
5286 || code == LEU || code == GEU)
5287 ? true : false);
5289 /* If FOLDED_ARG0 is a register, see if the comparison we are
5290 doing now is either the same as we did before or the reverse
5291 (we only check the reverse if not floating-point). */
5292 else if (GET_CODE (folded_arg0) == REG)
5294 int qty = reg_qty[REGNO (folded_arg0)];
5296 if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
5297 && (comparison_dominates_p (qty_comparison_code[qty], code)
5298 || (comparison_dominates_p (qty_comparison_code[qty],
5299 reverse_condition (code))
5300 && ! FLOAT_MODE_P (mode_arg0)))
5301 && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
5302 || (const_arg1
5303 && rtx_equal_p (qty_comparison_const[qty],
5304 const_arg1))
5305 || (GET_CODE (folded_arg1) == REG
5306 && (reg_qty[REGNO (folded_arg1)]
5307 == qty_comparison_qty[qty]))))
5308 return (comparison_dominates_p (qty_comparison_code[qty],
5309 code)
5310 ? true : false);
5315 /* If we are comparing against zero, see if the first operand is
5316 equivalent to an IOR with a constant. If so, we may be able to
5317 determine the result of this comparison. */
5319 if (const_arg1 == const0_rtx)
5321 rtx y = lookup_as_function (folded_arg0, IOR);
5322 rtx inner_const;
5324 if (y != 0
5325 && (inner_const = equiv_constant (XEXP (y, 1))) != 0
5326 && GET_CODE (inner_const) == CONST_INT
5327 && INTVAL (inner_const) != 0)
5329 int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
5330 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5331 && (INTVAL (inner_const)
5332 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
5333 rtx true = const_true_rtx, false = const0_rtx;
5335 #ifdef FLOAT_STORE_FLAG_VALUE
5336 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5338 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5339 mode);
5340 false = CONST0_RTX (mode);
5342 #endif
5344 switch (code)
5346 case EQ:
5347 return false;
5348 case NE:
5349 return true;
5350 case LT: case LE:
5351 if (has_sign)
5352 return true;
5353 break;
5354 case GT: case GE:
5355 if (has_sign)
5356 return false;
5357 break;
5362 new = simplify_relational_operation (code, mode_arg0,
5363 const_arg0 ? const_arg0 : folded_arg0,
5364 const_arg1 ? const_arg1 : folded_arg1);
5365 #ifdef FLOAT_STORE_FLAG_VALUE
5366 if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
5367 new = ((new == const0_rtx) ? CONST0_RTX (mode)
5368 : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE, mode));
5369 #endif
5370 break;
5372 case '2':
5373 case 'c':
5374 switch (code)
5376 case PLUS:
5377 /* If the second operand is a LABEL_REF, see if the first is a MINUS
5378 with that LABEL_REF as its second operand. If so, the result is
5379 the first operand of that MINUS. This handles switches with an
5380 ADDR_DIFF_VEC table. */
5381 if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
5383 rtx y
5384 = GET_CODE (folded_arg0) == MINUS ? folded_arg0
5385 : lookup_as_function (folded_arg0, MINUS);
5387 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5388 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
5389 return XEXP (y, 0);
5391 /* Now try for a CONST of a MINUS like the above. */
5392 if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
5393 : lookup_as_function (folded_arg0, CONST))) != 0
5394 && GET_CODE (XEXP (y, 0)) == MINUS
5395 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5396 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg1, 0))
5397 return XEXP (XEXP (y, 0), 0);
5400 /* Likewise if the operands are in the other order. */
5401 if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
5403 rtx y
5404 = GET_CODE (folded_arg1) == MINUS ? folded_arg1
5405 : lookup_as_function (folded_arg1, MINUS);
5407 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5408 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0))
5409 return XEXP (y, 0);
5411 /* Now try for a CONST of a MINUS like the above. */
5412 if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
5413 : lookup_as_function (folded_arg1, CONST))) != 0
5414 && GET_CODE (XEXP (y, 0)) == MINUS
5415 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5416 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg0, 0))
5417 return XEXP (XEXP (y, 0), 0);
5420 /* If second operand is a register equivalent to a negative
5421 CONST_INT, see if we can find a register equivalent to the
5422 positive constant. Make a MINUS if so. Don't do this for
5423 a negative constant since we might then alternate between
5424 chosing positive and negative constants. Having the positive
5425 constant previously-used is the more common case. */
5426 if (const_arg1 && GET_CODE (const_arg1) == CONST_INT
5427 && INTVAL (const_arg1) < 0 && GET_CODE (folded_arg1) == REG)
5429 rtx new_const = GEN_INT (- INTVAL (const_arg1));
5430 struct table_elt *p
5431 = lookup (new_const, safe_hash (new_const, mode) % NBUCKETS,
5432 mode);
5434 if (p)
5435 for (p = p->first_same_value; p; p = p->next_same_value)
5436 if (GET_CODE (p->exp) == REG)
5437 return cse_gen_binary (MINUS, mode, folded_arg0,
5438 canon_reg (p->exp, NULL_RTX));
5440 goto from_plus;
5442 case MINUS:
5443 /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
5444 If so, produce (PLUS Z C2-C). */
5445 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
5447 rtx y = lookup_as_function (XEXP (x, 0), PLUS);
5448 if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
5449 return fold_rtx (plus_constant (copy_rtx (y),
5450 -INTVAL (const_arg1)),
5451 NULL_RTX);
5454 /* ... fall through ... */
5456 from_plus:
5457 case SMIN: case SMAX: case UMIN: case UMAX:
5458 case IOR: case AND: case XOR:
5459 case MULT: case DIV: case UDIV:
5460 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
5461 /* If we have (<op> <reg> <const_int>) for an associative OP and REG
5462 is known to be of similar form, we may be able to replace the
5463 operation with a combined operation. This may eliminate the
5464 intermediate operation if every use is simplified in this way.
5465 Note that the similar optimization done by combine.c only works
5466 if the intermediate operation's result has only one reference. */
5468 if (GET_CODE (folded_arg0) == REG
5469 && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
5471 int is_shift
5472 = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
5473 rtx y = lookup_as_function (folded_arg0, code);
5474 rtx inner_const;
5475 enum rtx_code associate_code;
5476 rtx new_const;
5478 if (y == 0
5479 || 0 == (inner_const
5480 = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
5481 || GET_CODE (inner_const) != CONST_INT
5482 /* If we have compiled a statement like
5483 "if (x == (x & mask1))", and now are looking at
5484 "x & mask2", we will have a case where the first operand
5485 of Y is the same as our first operand. Unless we detect
5486 this case, an infinite loop will result. */
5487 || XEXP (y, 0) == folded_arg0)
5488 break;
5490 /* Don't associate these operations if they are a PLUS with the
5491 same constant and it is a power of two. These might be doable
5492 with a pre- or post-increment. Similarly for two subtracts of
5493 identical powers of two with post decrement. */
5495 if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
5496 && (0
5497 #if defined(HAVE_PRE_INCREMENT) || defined(HAVE_POST_INCREMENT)
5498 || exact_log2 (INTVAL (const_arg1)) >= 0
5499 #endif
5500 #if defined(HAVE_PRE_DECREMENT) || defined(HAVE_POST_DECREMENT)
5501 || exact_log2 (- INTVAL (const_arg1)) >= 0
5502 #endif
5504 break;
5506 /* Compute the code used to compose the constants. For example,
5507 A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
5509 associate_code
5510 = (code == MULT || code == DIV || code == UDIV ? MULT
5511 : is_shift || code == PLUS || code == MINUS ? PLUS : code);
5513 new_const = simplify_binary_operation (associate_code, mode,
5514 const_arg1, inner_const);
5516 if (new_const == 0)
5517 break;
5519 /* If we are associating shift operations, don't let this
5520 produce a shift of the size of the object or larger.
5521 This could occur when we follow a sign-extend by a right
5522 shift on a machine that does a sign-extend as a pair
5523 of shifts. */
5525 if (is_shift && GET_CODE (new_const) == CONST_INT
5526 && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
5528 /* As an exception, we can turn an ASHIFTRT of this
5529 form into a shift of the number of bits - 1. */
5530 if (code == ASHIFTRT)
5531 new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
5532 else
5533 break;
5536 y = copy_rtx (XEXP (y, 0));
5538 /* If Y contains our first operand (the most common way this
5539 can happen is if Y is a MEM), we would do into an infinite
5540 loop if we tried to fold it. So don't in that case. */
5542 if (! reg_mentioned_p (folded_arg0, y))
5543 y = fold_rtx (y, insn);
5545 return cse_gen_binary (code, mode, y, new_const);
5549 new = simplify_binary_operation (code, mode,
5550 const_arg0 ? const_arg0 : folded_arg0,
5551 const_arg1 ? const_arg1 : folded_arg1);
5552 break;
5554 case 'o':
5555 /* (lo_sum (high X) X) is simply X. */
5556 if (code == LO_SUM && const_arg0 != 0
5557 && GET_CODE (const_arg0) == HIGH
5558 && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
5559 return const_arg1;
5560 break;
5562 case '3':
5563 case 'b':
5564 new = simplify_ternary_operation (code, mode, mode_arg0,
5565 const_arg0 ? const_arg0 : folded_arg0,
5566 const_arg1 ? const_arg1 : folded_arg1,
5567 const_arg2 ? const_arg2 : XEXP (x, 2));
5568 break;
5571 return new ? new : x;
5574 /* Return a constant value currently equivalent to X.
5575 Return 0 if we don't know one. */
5577 static rtx
5578 equiv_constant (x)
5579 rtx x;
5581 if (GET_CODE (x) == REG
5582 && REGNO_QTY_VALID_P (REGNO (x))
5583 && qty_const[reg_qty[REGNO (x)]])
5584 x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
5586 if (x != 0 && CONSTANT_P (x))
5587 return x;
5589 /* If X is a MEM, try to fold it outside the context of any insn to see if
5590 it might be equivalent to a constant. That handles the case where it
5591 is a constant-pool reference. Then try to look it up in the hash table
5592 in case it is something whose value we have seen before. */
5594 if (GET_CODE (x) == MEM)
5596 struct table_elt *elt;
5598 x = fold_rtx (x, NULL_RTX);
5599 if (CONSTANT_P (x))
5600 return x;
5602 elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
5603 if (elt == 0)
5604 return 0;
5606 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
5607 if (elt->is_const && CONSTANT_P (elt->exp))
5608 return elt->exp;
5611 return 0;
5614 /* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
5615 number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
5616 least-significant part of X.
5617 MODE specifies how big a part of X to return.
5619 If the requested operation cannot be done, 0 is returned.
5621 This is similar to gen_lowpart in emit-rtl.c. */
5624 gen_lowpart_if_possible (mode, x)
5625 enum machine_mode mode;
5626 register rtx x;
5628 rtx result = gen_lowpart_common (mode, x);
5630 if (result)
5631 return result;
5632 else if (GET_CODE (x) == MEM)
5634 /* This is the only other case we handle. */
5635 register int offset = 0;
5636 rtx new;
5638 if (WORDS_BIG_ENDIAN)
5639 offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
5640 - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
5641 if (BYTES_BIG_ENDIAN)
5642 /* Adjust the address so that the address-after-the-data is
5643 unchanged. */
5644 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
5645 - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
5646 new = gen_rtx (MEM, mode, plus_constant (XEXP (x, 0), offset));
5647 if (! memory_address_p (mode, XEXP (new, 0)))
5648 return 0;
5649 MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
5650 RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
5651 MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
5652 return new;
5654 else
5655 return 0;
5658 /* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
5659 branch. It will be zero if not.
5661 In certain cases, this can cause us to add an equivalence. For example,
5662 if we are following the taken case of
5663 if (i == 2)
5664 we can add the fact that `i' and '2' are now equivalent.
5666 In any case, we can record that this comparison was passed. If the same
5667 comparison is seen later, we will know its value. */
5669 static void
5670 record_jump_equiv (insn, taken)
5671 rtx insn;
5672 int taken;
5674 int cond_known_true;
5675 rtx op0, op1;
5676 enum machine_mode mode, mode0, mode1;
5677 int reversed_nonequality = 0;
5678 enum rtx_code code;
5680 /* Ensure this is the right kind of insn. */
5681 if (! condjump_p (insn) || simplejump_p (insn))
5682 return;
5684 /* See if this jump condition is known true or false. */
5685 if (taken)
5686 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
5687 else
5688 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
5690 /* Get the type of comparison being done and the operands being compared.
5691 If we had to reverse a non-equality condition, record that fact so we
5692 know that it isn't valid for floating-point. */
5693 code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
5694 op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
5695 op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
5697 code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
5698 if (! cond_known_true)
5700 reversed_nonequality = (code != EQ && code != NE);
5701 code = reverse_condition (code);
5704 /* The mode is the mode of the non-constant. */
5705 mode = mode0;
5706 if (mode1 != VOIDmode)
5707 mode = mode1;
5709 record_jump_cond (code, mode, op0, op1, reversed_nonequality);
5712 /* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
5713 REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
5714 Make any useful entries we can with that information. Called from
5715 above function and called recursively. */
5717 static void
5718 record_jump_cond (code, mode, op0, op1, reversed_nonequality)
5719 enum rtx_code code;
5720 enum machine_mode mode;
5721 rtx op0, op1;
5722 int reversed_nonequality;
5724 unsigned op0_hash, op1_hash;
5725 int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
5726 struct table_elt *op0_elt, *op1_elt;
5728 /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
5729 we know that they are also equal in the smaller mode (this is also
5730 true for all smaller modes whether or not there is a SUBREG, but
5731 is not worth testing for with no SUBREG. */
5733 /* Note that GET_MODE (op0) may not equal MODE. */
5734 if (code == EQ && GET_CODE (op0) == SUBREG
5735 && (GET_MODE_SIZE (GET_MODE (op0))
5736 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5738 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5739 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5741 record_jump_cond (code, mode, SUBREG_REG (op0),
5742 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5743 reversed_nonequality);
5746 if (code == EQ && GET_CODE (op1) == SUBREG
5747 && (GET_MODE_SIZE (GET_MODE (op1))
5748 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5750 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5751 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5753 record_jump_cond (code, mode, SUBREG_REG (op1),
5754 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5755 reversed_nonequality);
5758 /* Similarly, if this is an NE comparison, and either is a SUBREG
5759 making a smaller mode, we know the whole thing is also NE. */
5761 /* Note that GET_MODE (op0) may not equal MODE;
5762 if we test MODE instead, we can get an infinite recursion
5763 alternating between two modes each wider than MODE. */
5765 if (code == NE && GET_CODE (op0) == SUBREG
5766 && subreg_lowpart_p (op0)
5767 && (GET_MODE_SIZE (GET_MODE (op0))
5768 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5770 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5771 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5773 record_jump_cond (code, mode, SUBREG_REG (op0),
5774 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5775 reversed_nonequality);
5778 if (code == NE && GET_CODE (op1) == SUBREG
5779 && subreg_lowpart_p (op1)
5780 && (GET_MODE_SIZE (GET_MODE (op1))
5781 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5783 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5784 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5786 record_jump_cond (code, mode, SUBREG_REG (op1),
5787 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5788 reversed_nonequality);
5791 /* Hash both operands. */
5793 do_not_record = 0;
5794 hash_arg_in_memory = 0;
5795 hash_arg_in_struct = 0;
5796 op0_hash = HASH (op0, mode);
5797 op0_in_memory = hash_arg_in_memory;
5798 op0_in_struct = hash_arg_in_struct;
5800 if (do_not_record)
5801 return;
5803 do_not_record = 0;
5804 hash_arg_in_memory = 0;
5805 hash_arg_in_struct = 0;
5806 op1_hash = HASH (op1, mode);
5807 op1_in_memory = hash_arg_in_memory;
5808 op1_in_struct = hash_arg_in_struct;
5810 if (do_not_record)
5811 return;
5813 /* Look up both operands. */
5814 op0_elt = lookup (op0, op0_hash, mode);
5815 op1_elt = lookup (op1, op1_hash, mode);
5817 /* If both operands are already equivalent or if they are not in the
5818 table but are identical, do nothing. */
5819 if ((op0_elt != 0 && op1_elt != 0
5820 && op0_elt->first_same_value == op1_elt->first_same_value)
5821 || op0 == op1 || rtx_equal_p (op0, op1))
5822 return;
5824 /* If we aren't setting two things equal all we can do is save this
5825 comparison. Similarly if this is floating-point. In the latter
5826 case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
5827 If we record the equality, we might inadvertently delete code
5828 whose intent was to change -0 to +0. */
5830 if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
5832 /* If we reversed a floating-point comparison, if OP0 is not a
5833 register, or if OP1 is neither a register or constant, we can't
5834 do anything. */
5836 if (GET_CODE (op1) != REG)
5837 op1 = equiv_constant (op1);
5839 if ((reversed_nonequality && FLOAT_MODE_P (mode))
5840 || GET_CODE (op0) != REG || op1 == 0)
5841 return;
5843 /* Put OP0 in the hash table if it isn't already. This gives it a
5844 new quantity number. */
5845 if (op0_elt == 0)
5847 if (insert_regs (op0, NULL_PTR, 0))
5849 rehash_using_reg (op0);
5850 op0_hash = HASH (op0, mode);
5852 /* If OP0 is contained in OP1, this changes its hash code
5853 as well. Faster to rehash than to check, except
5854 for the simple case of a constant. */
5855 if (! CONSTANT_P (op1))
5856 op1_hash = HASH (op1,mode);
5859 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
5860 op0_elt->in_memory = op0_in_memory;
5861 op0_elt->in_struct = op0_in_struct;
5864 qty_comparison_code[reg_qty[REGNO (op0)]] = code;
5865 if (GET_CODE (op1) == REG)
5867 /* Look it up again--in case op0 and op1 are the same. */
5868 op1_elt = lookup (op1, op1_hash, mode);
5870 /* Put OP1 in the hash table so it gets a new quantity number. */
5871 if (op1_elt == 0)
5873 if (insert_regs (op1, NULL_PTR, 0))
5875 rehash_using_reg (op1);
5876 op1_hash = HASH (op1, mode);
5879 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
5880 op1_elt->in_memory = op1_in_memory;
5881 op1_elt->in_struct = op1_in_struct;
5884 qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
5885 qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
5887 else
5889 qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
5890 qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
5893 return;
5896 /* If either side is still missing an equivalence, make it now,
5897 then merge the equivalences. */
5899 if (op0_elt == 0)
5901 if (insert_regs (op0, NULL_PTR, 0))
5903 rehash_using_reg (op0);
5904 op0_hash = HASH (op0, mode);
5907 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
5908 op0_elt->in_memory = op0_in_memory;
5909 op0_elt->in_struct = op0_in_struct;
5912 if (op1_elt == 0)
5914 if (insert_regs (op1, NULL_PTR, 0))
5916 rehash_using_reg (op1);
5917 op1_hash = HASH (op1, mode);
5920 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
5921 op1_elt->in_memory = op1_in_memory;
5922 op1_elt->in_struct = op1_in_struct;
5925 merge_equiv_classes (op0_elt, op1_elt);
5926 last_jump_equiv_class = op0_elt;
5929 /* CSE processing for one instruction.
5930 First simplify sources and addresses of all assignments
5931 in the instruction, using previously-computed equivalents values.
5932 Then install the new sources and destinations in the table
5933 of available values.
5935 If IN_LIBCALL_BLOCK is nonzero, don't record any equivalence made in
5936 the insn. */
5938 /* Data on one SET contained in the instruction. */
5940 struct set
5942 /* The SET rtx itself. */
5943 rtx rtl;
5944 /* The SET_SRC of the rtx (the original value, if it is changing). */
5945 rtx src;
5946 /* The hash-table element for the SET_SRC of the SET. */
5947 struct table_elt *src_elt;
5948 /* Hash value for the SET_SRC. */
5949 unsigned src_hash;
5950 /* Hash value for the SET_DEST. */
5951 unsigned dest_hash;
5952 /* The SET_DEST, with SUBREG, etc., stripped. */
5953 rtx inner_dest;
5954 /* Place where the pointer to the INNER_DEST was found. */
5955 rtx *inner_dest_loc;
5956 /* Nonzero if the SET_SRC is in memory. */
5957 char src_in_memory;
5958 /* Nonzero if the SET_SRC is in a structure. */
5959 char src_in_struct;
5960 /* Nonzero if the SET_SRC contains something
5961 whose value cannot be predicted and understood. */
5962 char src_volatile;
5963 /* Original machine mode, in case it becomes a CONST_INT. */
5964 enum machine_mode mode;
5965 /* A constant equivalent for SET_SRC, if any. */
5966 rtx src_const;
5967 /* Hash value of constant equivalent for SET_SRC. */
5968 unsigned src_const_hash;
5969 /* Table entry for constant equivalent for SET_SRC, if any. */
5970 struct table_elt *src_const_elt;
5973 static void
5974 cse_insn (insn, in_libcall_block)
5975 rtx insn;
5976 int in_libcall_block;
5978 register rtx x = PATTERN (insn);
5979 register int i;
5980 rtx tem;
5981 register int n_sets = 0;
5983 /* Records what this insn does to set CC0. */
5984 rtx this_insn_cc0 = 0;
5985 enum machine_mode this_insn_cc0_mode;
5986 struct write_data writes_memory;
5987 static struct write_data init = {0, 0, 0, 0};
5989 rtx src_eqv = 0;
5990 struct table_elt *src_eqv_elt = 0;
5991 int src_eqv_volatile;
5992 int src_eqv_in_memory;
5993 int src_eqv_in_struct;
5994 unsigned src_eqv_hash;
5996 struct set *sets;
5998 this_insn = insn;
5999 writes_memory = init;
6001 /* Find all the SETs and CLOBBERs in this instruction.
6002 Record all the SETs in the array `set' and count them.
6003 Also determine whether there is a CLOBBER that invalidates
6004 all memory references, or all references at varying addresses. */
6006 if (GET_CODE (insn) == CALL_INSN)
6008 for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
6009 if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
6010 invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
6013 if (GET_CODE (x) == SET)
6015 sets = (struct set *) alloca (sizeof (struct set));
6016 sets[0].rtl = x;
6018 /* Ignore SETs that are unconditional jumps.
6019 They never need cse processing, so this does not hurt.
6020 The reason is not efficiency but rather
6021 so that we can test at the end for instructions
6022 that have been simplified to unconditional jumps
6023 and not be misled by unchanged instructions
6024 that were unconditional jumps to begin with. */
6025 if (SET_DEST (x) == pc_rtx
6026 && GET_CODE (SET_SRC (x)) == LABEL_REF)
6029 /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
6030 The hard function value register is used only once, to copy to
6031 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
6032 Ensure we invalidate the destination register. On the 80386 no
6033 other code would invalidate it since it is a fixed_reg.
6034 We need not check the return of apply_change_group; see canon_reg. */
6036 else if (GET_CODE (SET_SRC (x)) == CALL)
6038 canon_reg (SET_SRC (x), insn);
6039 apply_change_group ();
6040 fold_rtx (SET_SRC (x), insn);
6041 invalidate (SET_DEST (x), VOIDmode);
6043 else
6044 n_sets = 1;
6046 else if (GET_CODE (x) == PARALLEL)
6048 register int lim = XVECLEN (x, 0);
6050 sets = (struct set *) alloca (lim * sizeof (struct set));
6052 /* Find all regs explicitly clobbered in this insn,
6053 and ensure they are not replaced with any other regs
6054 elsewhere in this insn.
6055 When a reg that is clobbered is also used for input,
6056 we should presume that that is for a reason,
6057 and we should not substitute some other register
6058 which is not supposed to be clobbered.
6059 Therefore, this loop cannot be merged into the one below
6060 because a CALL may precede a CLOBBER and refer to the
6061 value clobbered. We must not let a canonicalization do
6062 anything in that case. */
6063 for (i = 0; i < lim; i++)
6065 register rtx y = XVECEXP (x, 0, i);
6066 if (GET_CODE (y) == CLOBBER)
6068 rtx clobbered = XEXP (y, 0);
6070 if (GET_CODE (clobbered) == REG
6071 || GET_CODE (clobbered) == SUBREG)
6072 invalidate (clobbered, VOIDmode);
6073 else if (GET_CODE (clobbered) == STRICT_LOW_PART
6074 || GET_CODE (clobbered) == ZERO_EXTRACT)
6075 invalidate (XEXP (clobbered, 0), GET_MODE (clobbered));
6079 for (i = 0; i < lim; i++)
6081 register rtx y = XVECEXP (x, 0, i);
6082 if (GET_CODE (y) == SET)
6084 /* As above, we ignore unconditional jumps and call-insns and
6085 ignore the result of apply_change_group. */
6086 if (GET_CODE (SET_SRC (y)) == CALL)
6088 canon_reg (SET_SRC (y), insn);
6089 apply_change_group ();
6090 fold_rtx (SET_SRC (y), insn);
6091 invalidate (SET_DEST (y), VOIDmode);
6093 else if (SET_DEST (y) == pc_rtx
6094 && GET_CODE (SET_SRC (y)) == LABEL_REF)
6096 else
6097 sets[n_sets++].rtl = y;
6099 else if (GET_CODE (y) == CLOBBER)
6101 /* If we clobber memory, take note of that,
6102 and canon the address.
6103 This does nothing when a register is clobbered
6104 because we have already invalidated the reg. */
6105 if (GET_CODE (XEXP (y, 0)) == MEM)
6107 canon_reg (XEXP (y, 0), NULL_RTX);
6108 note_mem_written (XEXP (y, 0), &writes_memory);
6111 else if (GET_CODE (y) == USE
6112 && ! (GET_CODE (XEXP (y, 0)) == REG
6113 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
6114 canon_reg (y, NULL_RTX);
6115 else if (GET_CODE (y) == CALL)
6117 /* The result of apply_change_group can be ignored; see
6118 canon_reg. */
6119 canon_reg (y, insn);
6120 apply_change_group ();
6121 fold_rtx (y, insn);
6125 else if (GET_CODE (x) == CLOBBER)
6127 if (GET_CODE (XEXP (x, 0)) == MEM)
6129 canon_reg (XEXP (x, 0), NULL_RTX);
6130 note_mem_written (XEXP (x, 0), &writes_memory);
6134 /* Canonicalize a USE of a pseudo register or memory location. */
6135 else if (GET_CODE (x) == USE
6136 && ! (GET_CODE (XEXP (x, 0)) == REG
6137 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
6138 canon_reg (XEXP (x, 0), NULL_RTX);
6139 else if (GET_CODE (x) == CALL)
6141 /* The result of apply_change_group can be ignored; see canon_reg. */
6142 canon_reg (x, insn);
6143 apply_change_group ();
6144 fold_rtx (x, insn);
6147 /* Store the equivalent value in SRC_EQV, if different, or if the DEST
6148 is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV
6149 is handled specially for this case, and if it isn't set, then there will
6150 be no equivalence for the destinatation. */
6151 if (n_sets == 1 && REG_NOTES (insn) != 0
6152 && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
6153 && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
6154 || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
6155 src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
6157 /* Canonicalize sources and addresses of destinations.
6158 We do this in a separate pass to avoid problems when a MATCH_DUP is
6159 present in the insn pattern. In that case, we want to ensure that
6160 we don't break the duplicate nature of the pattern. So we will replace
6161 both operands at the same time. Otherwise, we would fail to find an
6162 equivalent substitution in the loop calling validate_change below.
6164 We used to suppress canonicalization of DEST if it appears in SRC,
6165 but we don't do this any more. */
6167 for (i = 0; i < n_sets; i++)
6169 rtx dest = SET_DEST (sets[i].rtl);
6170 rtx src = SET_SRC (sets[i].rtl);
6171 rtx new = canon_reg (src, insn);
6173 if ((GET_CODE (new) == REG && GET_CODE (src) == REG
6174 && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
6175 != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
6176 || insn_n_dups[recog_memoized (insn)] > 0)
6177 validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
6178 else
6179 SET_SRC (sets[i].rtl) = new;
6181 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
6183 validate_change (insn, &XEXP (dest, 1),
6184 canon_reg (XEXP (dest, 1), insn), 1);
6185 validate_change (insn, &XEXP (dest, 2),
6186 canon_reg (XEXP (dest, 2), insn), 1);
6189 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
6190 || GET_CODE (dest) == ZERO_EXTRACT
6191 || GET_CODE (dest) == SIGN_EXTRACT)
6192 dest = XEXP (dest, 0);
6194 if (GET_CODE (dest) == MEM)
6195 canon_reg (dest, insn);
6198 /* Now that we have done all the replacements, we can apply the change
6199 group and see if they all work. Note that this will cause some
6200 canonicalizations that would have worked individually not to be applied
6201 because some other canonicalization didn't work, but this should not
6202 occur often.
6204 The result of apply_change_group can be ignored; see canon_reg. */
6206 apply_change_group ();
6208 /* Set sets[i].src_elt to the class each source belongs to.
6209 Detect assignments from or to volatile things
6210 and set set[i] to zero so they will be ignored
6211 in the rest of this function.
6213 Nothing in this loop changes the hash table or the register chains. */
6215 for (i = 0; i < n_sets; i++)
6217 register rtx src, dest;
6218 register rtx src_folded;
6219 register struct table_elt *elt = 0, *p;
6220 enum machine_mode mode;
6221 rtx src_eqv_here;
6222 rtx src_const = 0;
6223 rtx src_related = 0;
6224 struct table_elt *src_const_elt = 0;
6225 int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
6226 int src_related_cost = 10000, src_elt_cost = 10000;
6227 /* Set non-zero if we need to call force_const_mem on with the
6228 contents of src_folded before using it. */
6229 int src_folded_force_flag = 0;
6231 dest = SET_DEST (sets[i].rtl);
6232 src = SET_SRC (sets[i].rtl);
6234 /* If SRC is a constant that has no machine mode,
6235 hash it with the destination's machine mode.
6236 This way we can keep different modes separate. */
6238 mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
6239 sets[i].mode = mode;
6241 if (src_eqv)
6243 enum machine_mode eqvmode = mode;
6244 if (GET_CODE (dest) == STRICT_LOW_PART)
6245 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
6246 do_not_record = 0;
6247 hash_arg_in_memory = 0;
6248 hash_arg_in_struct = 0;
6249 src_eqv = fold_rtx (src_eqv, insn);
6250 src_eqv_hash = HASH (src_eqv, eqvmode);
6252 /* Find the equivalence class for the equivalent expression. */
6254 if (!do_not_record)
6255 src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);
6257 src_eqv_volatile = do_not_record;
6258 src_eqv_in_memory = hash_arg_in_memory;
6259 src_eqv_in_struct = hash_arg_in_struct;
6262 /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
6263 value of the INNER register, not the destination. So it is not
6264 a legal substitution for the source. But save it for later. */
6265 if (GET_CODE (dest) == STRICT_LOW_PART)
6266 src_eqv_here = 0;
6267 else
6268 src_eqv_here = src_eqv;
6270 /* Simplify and foldable subexpressions in SRC. Then get the fully-
6271 simplified result, which may not necessarily be valid. */
6272 src_folded = fold_rtx (src, insn);
6274 /* If storing a constant in a bitfield, pre-truncate the constant
6275 so we will be able to record it later. */
6276 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6277 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6279 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6281 if (GET_CODE (src) == CONST_INT
6282 && GET_CODE (width) == CONST_INT
6283 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6284 && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6285 src_folded
6286 = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
6287 << INTVAL (width)) - 1));
6290 /* Compute SRC's hash code, and also notice if it
6291 should not be recorded at all. In that case,
6292 prevent any further processing of this assignment. */
6293 do_not_record = 0;
6294 hash_arg_in_memory = 0;
6295 hash_arg_in_struct = 0;
6297 sets[i].src = src;
6298 sets[i].src_hash = HASH (src, mode);
6299 sets[i].src_volatile = do_not_record;
6300 sets[i].src_in_memory = hash_arg_in_memory;
6301 sets[i].src_in_struct = hash_arg_in_struct;
6303 #if 0
6304 /* It is no longer clear why we used to do this, but it doesn't
6305 appear to still be needed. So let's try without it since this
6306 code hurts cse'ing widened ops. */
6307 /* If source is a perverse subreg (such as QI treated as an SI),
6308 treat it as volatile. It may do the work of an SI in one context
6309 where the extra bits are not being used, but cannot replace an SI
6310 in general. */
6311 if (GET_CODE (src) == SUBREG
6312 && (GET_MODE_SIZE (GET_MODE (src))
6313 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6314 sets[i].src_volatile = 1;
6315 #endif
6317 /* Locate all possible equivalent forms for SRC. Try to replace
6318 SRC in the insn with each cheaper equivalent.
6320 We have the following types of equivalents: SRC itself, a folded
6321 version, a value given in a REG_EQUAL note, or a value related
6322 to a constant.
6324 Each of these equivalents may be part of an additional class
6325 of equivalents (if more than one is in the table, they must be in
6326 the same class; we check for this).
6328 If the source is volatile, we don't do any table lookups.
6330 We note any constant equivalent for possible later use in a
6331 REG_NOTE. */
6333 if (!sets[i].src_volatile)
6334 elt = lookup (src, sets[i].src_hash, mode);
6336 sets[i].src_elt = elt;
6338 if (elt && src_eqv_here && src_eqv_elt)
6340 if (elt->first_same_value != src_eqv_elt->first_same_value)
6342 /* The REG_EQUAL is indicating that two formerly distinct
6343 classes are now equivalent. So merge them. */
6344 merge_equiv_classes (elt, src_eqv_elt);
6345 src_eqv_hash = HASH (src_eqv, elt->mode);
6346 src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
6349 src_eqv_here = 0;
6352 else if (src_eqv_elt)
6353 elt = src_eqv_elt;
6355 /* Try to find a constant somewhere and record it in `src_const'.
6356 Record its table element, if any, in `src_const_elt'. Look in
6357 any known equivalences first. (If the constant is not in the
6358 table, also set `sets[i].src_const_hash'). */
6359 if (elt)
6360 for (p = elt->first_same_value; p; p = p->next_same_value)
6361 if (p->is_const)
6363 src_const = p->exp;
6364 src_const_elt = elt;
6365 break;
6368 if (src_const == 0
6369 && (CONSTANT_P (src_folded)
6370 /* Consider (minus (label_ref L1) (label_ref L2)) as
6371 "constant" here so we will record it. This allows us
6372 to fold switch statements when an ADDR_DIFF_VEC is used. */
6373 || (GET_CODE (src_folded) == MINUS
6374 && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
6375 && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
6376 src_const = src_folded, src_const_elt = elt;
6377 else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
6378 src_const = src_eqv_here, src_const_elt = src_eqv_elt;
6380 /* If we don't know if the constant is in the table, get its
6381 hash code and look it up. */
6382 if (src_const && src_const_elt == 0)
6384 sets[i].src_const_hash = HASH (src_const, mode);
6385 src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
6388 sets[i].src_const = src_const;
6389 sets[i].src_const_elt = src_const_elt;
6391 /* If the constant and our source are both in the table, mark them as
6392 equivalent. Otherwise, if a constant is in the table but the source
6393 isn't, set ELT to it. */
6394 if (src_const_elt && elt
6395 && src_const_elt->first_same_value != elt->first_same_value)
6396 merge_equiv_classes (elt, src_const_elt);
6397 else if (src_const_elt && elt == 0)
6398 elt = src_const_elt;
6400 /* See if there is a register linearly related to a constant
6401 equivalent of SRC. */
6402 if (src_const
6403 && (GET_CODE (src_const) == CONST
6404 || (src_const_elt && src_const_elt->related_value != 0)))
6406 src_related = use_related_value (src_const, src_const_elt);
6407 if (src_related)
6409 struct table_elt *src_related_elt
6410 = lookup (src_related, HASH (src_related, mode), mode);
6411 if (src_related_elt && elt)
6413 if (elt->first_same_value
6414 != src_related_elt->first_same_value)
6415 /* This can occur when we previously saw a CONST
6416 involving a SYMBOL_REF and then see the SYMBOL_REF
6417 twice. Merge the involved classes. */
6418 merge_equiv_classes (elt, src_related_elt);
6420 src_related = 0;
6421 src_related_elt = 0;
6423 else if (src_related_elt && elt == 0)
6424 elt = src_related_elt;
6428 /* See if we have a CONST_INT that is already in a register in a
6429 wider mode. */
6431 if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
6432 && GET_MODE_CLASS (mode) == MODE_INT
6433 && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
6435 enum machine_mode wider_mode;
6437 for (wider_mode = GET_MODE_WIDER_MODE (mode);
6438 GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
6439 && src_related == 0;
6440 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
6442 struct table_elt *const_elt
6443 = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
6445 if (const_elt == 0)
6446 continue;
6448 for (const_elt = const_elt->first_same_value;
6449 const_elt; const_elt = const_elt->next_same_value)
6450 if (GET_CODE (const_elt->exp) == REG)
6452 src_related = gen_lowpart_if_possible (mode,
6453 const_elt->exp);
6454 break;
6459 /* Another possibility is that we have an AND with a constant in
6460 a mode narrower than a word. If so, it might have been generated
6461 as part of an "if" which would narrow the AND. If we already
6462 have done the AND in a wider mode, we can use a SUBREG of that
6463 value. */
6465 if (flag_expensive_optimizations && ! src_related
6466 && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
6467 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6469 enum machine_mode tmode;
6470 rtx new_and = gen_rtx (AND, VOIDmode, NULL_RTX, XEXP (src, 1));
6472 for (tmode = GET_MODE_WIDER_MODE (mode);
6473 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6474 tmode = GET_MODE_WIDER_MODE (tmode))
6476 rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
6477 struct table_elt *larger_elt;
6479 if (inner)
6481 PUT_MODE (new_and, tmode);
6482 XEXP (new_and, 0) = inner;
6483 larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
6484 if (larger_elt == 0)
6485 continue;
6487 for (larger_elt = larger_elt->first_same_value;
6488 larger_elt; larger_elt = larger_elt->next_same_value)
6489 if (GET_CODE (larger_elt->exp) == REG)
6491 src_related
6492 = gen_lowpart_if_possible (mode, larger_elt->exp);
6493 break;
6496 if (src_related)
6497 break;
6502 #ifdef LOAD_EXTEND_OP
6503 /* See if a MEM has already been loaded with a widening operation;
6504 if it has, we can use a subreg of that. Many CISC machines
6505 also have such operations, but this is only likely to be
6506 beneficial these machines. */
6508 if (flag_expensive_optimizations && src_related == 0
6509 && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6510 && GET_MODE_CLASS (mode) == MODE_INT
6511 && GET_CODE (src) == MEM && ! do_not_record
6512 && LOAD_EXTEND_OP (mode) != NIL)
6514 enum machine_mode tmode;
6516 /* Set what we are trying to extend and the operation it might
6517 have been extended with. */
6518 PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
6519 XEXP (memory_extend_rtx, 0) = src;
6521 for (tmode = GET_MODE_WIDER_MODE (mode);
6522 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6523 tmode = GET_MODE_WIDER_MODE (tmode))
6525 struct table_elt *larger_elt;
6527 PUT_MODE (memory_extend_rtx, tmode);
6528 larger_elt = lookup (memory_extend_rtx,
6529 HASH (memory_extend_rtx, tmode), tmode);
6530 if (larger_elt == 0)
6531 continue;
6533 for (larger_elt = larger_elt->first_same_value;
6534 larger_elt; larger_elt = larger_elt->next_same_value)
6535 if (GET_CODE (larger_elt->exp) == REG)
6537 src_related = gen_lowpart_if_possible (mode,
6538 larger_elt->exp);
6539 break;
6542 if (src_related)
6543 break;
6546 #endif /* LOAD_EXTEND_OP */
6548 if (src == src_folded)
6549 src_folded = 0;
6551 /* At this point, ELT, if non-zero, points to a class of expressions
6552 equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
6553 and SRC_RELATED, if non-zero, each contain additional equivalent
6554 expressions. Prune these latter expressions by deleting expressions
6555 already in the equivalence class.
6557 Check for an equivalent identical to the destination. If found,
6558 this is the preferred equivalent since it will likely lead to
6559 elimination of the insn. Indicate this by placing it in
6560 `src_related'. */
6562 if (elt) elt = elt->first_same_value;
6563 for (p = elt; p; p = p->next_same_value)
6565 enum rtx_code code = GET_CODE (p->exp);
6567 /* If the expression is not valid, ignore it. Then we do not
6568 have to check for validity below. In most cases, we can use
6569 `rtx_equal_p', since canonicalization has already been done. */
6570 if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
6571 continue;
6573 if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
6574 src = 0;
6575 else if (src_folded && GET_CODE (src_folded) == code
6576 && rtx_equal_p (src_folded, p->exp))
6577 src_folded = 0;
6578 else if (src_eqv_here && GET_CODE (src_eqv_here) == code
6579 && rtx_equal_p (src_eqv_here, p->exp))
6580 src_eqv_here = 0;
6581 else if (src_related && GET_CODE (src_related) == code
6582 && rtx_equal_p (src_related, p->exp))
6583 src_related = 0;
6585 /* This is the same as the destination of the insns, we want
6586 to prefer it. Copy it to src_related. The code below will
6587 then give it a negative cost. */
6588 if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
6589 src_related = dest;
6593 /* Find the cheapest valid equivalent, trying all the available
6594 possibilities. Prefer items not in the hash table to ones
6595 that are when they are equal cost. Note that we can never
6596 worsen an insn as the current contents will also succeed.
6597 If we find an equivalent identical to the destination, use it as best,
6598 since this insn will probably be eliminated in that case. */
6599 if (src)
6601 if (rtx_equal_p (src, dest))
6602 src_cost = -1;
6603 else
6604 src_cost = COST (src);
6607 if (src_eqv_here)
6609 if (rtx_equal_p (src_eqv_here, dest))
6610 src_eqv_cost = -1;
6611 else
6612 src_eqv_cost = COST (src_eqv_here);
6615 if (src_folded)
6617 if (rtx_equal_p (src_folded, dest))
6618 src_folded_cost = -1;
6619 else
6620 src_folded_cost = COST (src_folded);
6623 if (src_related)
6625 if (rtx_equal_p (src_related, dest))
6626 src_related_cost = -1;
6627 else
6628 src_related_cost = COST (src_related);
6631 /* If this was an indirect jump insn, a known label will really be
6632 cheaper even though it looks more expensive. */
6633 if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
6634 src_folded = src_const, src_folded_cost = -1;
6636 /* Terminate loop when replacement made. This must terminate since
6637 the current contents will be tested and will always be valid. */
6638 while (1)
6640 rtx trial;
6642 /* Skip invalid entries. */
6643 while (elt && GET_CODE (elt->exp) != REG
6644 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
6645 elt = elt->next_same_value;
6647 if (elt) src_elt_cost = elt->cost;
6649 /* Find cheapest and skip it for the next time. For items
6650 of equal cost, use this order:
6651 src_folded, src, src_eqv, src_related and hash table entry. */
6652 if (src_folded_cost <= src_cost
6653 && src_folded_cost <= src_eqv_cost
6654 && src_folded_cost <= src_related_cost
6655 && src_folded_cost <= src_elt_cost)
6657 trial = src_folded, src_folded_cost = 10000;
6658 if (src_folded_force_flag)
6659 trial = force_const_mem (mode, trial);
6661 else if (src_cost <= src_eqv_cost
6662 && src_cost <= src_related_cost
6663 && src_cost <= src_elt_cost)
6664 trial = src, src_cost = 10000;
6665 else if (src_eqv_cost <= src_related_cost
6666 && src_eqv_cost <= src_elt_cost)
6667 trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
6668 else if (src_related_cost <= src_elt_cost)
6669 trial = copy_rtx (src_related), src_related_cost = 10000;
6670 else
6672 trial = copy_rtx (elt->exp);
6673 elt = elt->next_same_value;
6674 src_elt_cost = 10000;
6677 /* We don't normally have an insn matching (set (pc) (pc)), so
6678 check for this separately here. We will delete such an
6679 insn below.
6681 Tablejump insns contain a USE of the table, so simply replacing
6682 the operand with the constant won't match. This is simply an
6683 unconditional branch, however, and is therefore valid. Just
6684 insert the substitution here and we will delete and re-emit
6685 the insn later. */
6687 if (n_sets == 1 && dest == pc_rtx
6688 && (trial == pc_rtx
6689 || (GET_CODE (trial) == LABEL_REF
6690 && ! condjump_p (insn))))
6692 /* If TRIAL is a label in front of a jump table, we are
6693 really falling through the switch (this is how casesi
6694 insns work), so we must branch around the table. */
6695 if (GET_CODE (trial) == CODE_LABEL
6696 && NEXT_INSN (trial) != 0
6697 && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
6698 && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
6699 || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
6701 trial = gen_rtx (LABEL_REF, Pmode, get_label_after (trial));
6703 SET_SRC (sets[i].rtl) = trial;
6704 cse_jumps_altered = 1;
6705 break;
6708 /* Look for a substitution that makes a valid insn. */
6709 else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
6711 /* The result of apply_change_group can be ignored; see
6712 canon_reg. */
6714 validate_change (insn, &SET_SRC (sets[i].rtl),
6715 canon_reg (SET_SRC (sets[i].rtl), insn),
6717 apply_change_group ();
6718 break;
6721 /* If we previously found constant pool entries for
6722 constants and this is a constant, try making a
6723 pool entry. Put it in src_folded unless we already have done
6724 this since that is where it likely came from. */
6726 else if (constant_pool_entries_cost
6727 && CONSTANT_P (trial)
6728 && ! (GET_CODE (trial) == CONST
6729 && GET_CODE (XEXP (trial, 0)) == TRUNCATE)
6730 && (src_folded == 0
6731 || (GET_CODE (src_folded) != MEM
6732 && ! src_folded_force_flag))
6733 && GET_MODE_CLASS (mode) != MODE_CC)
6735 src_folded_force_flag = 1;
6736 src_folded = trial;
6737 src_folded_cost = constant_pool_entries_cost;
6741 src = SET_SRC (sets[i].rtl);
6743 /* In general, it is good to have a SET with SET_SRC == SET_DEST.
6744 However, there is an important exception: If both are registers
6745 that are not the head of their equivalence class, replace SET_SRC
6746 with the head of the class. If we do not do this, we will have
6747 both registers live over a portion of the basic block. This way,
6748 their lifetimes will likely abut instead of overlapping. */
6749 if (GET_CODE (dest) == REG
6750 && REGNO_QTY_VALID_P (REGNO (dest))
6751 && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
6752 && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
6753 && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
6754 /* Don't do this if the original insn had a hard reg as
6755 SET_SRC. */
6756 && (GET_CODE (sets[i].src) != REG
6757 || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
6758 /* We can't call canon_reg here because it won't do anything if
6759 SRC is a hard register. */
6761 int first = qty_first_reg[reg_qty[REGNO (src)]];
6763 src = SET_SRC (sets[i].rtl)
6764 = first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
6765 : gen_rtx (REG, GET_MODE (src), first);
6767 /* If we had a constant that is cheaper than what we are now
6768 setting SRC to, use that constant. We ignored it when we
6769 thought we could make this into a no-op. */
6770 if (src_const && COST (src_const) < COST (src)
6771 && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0))
6772 src = src_const;
6775 /* If we made a change, recompute SRC values. */
6776 if (src != sets[i].src)
6778 do_not_record = 0;
6779 hash_arg_in_memory = 0;
6780 hash_arg_in_struct = 0;
6781 sets[i].src = src;
6782 sets[i].src_hash = HASH (src, mode);
6783 sets[i].src_volatile = do_not_record;
6784 sets[i].src_in_memory = hash_arg_in_memory;
6785 sets[i].src_in_struct = hash_arg_in_struct;
6786 sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
6789 /* If this is a single SET, we are setting a register, and we have an
6790 equivalent constant, we want to add a REG_NOTE. We don't want
6791 to write a REG_EQUAL note for a constant pseudo since verifying that
6792 that pseudo hasn't been eliminated is a pain. Such a note also
6793 won't help anything. */
6794 if (n_sets == 1 && src_const && GET_CODE (dest) == REG
6795 && GET_CODE (src_const) != REG)
6797 tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
6799 /* Record the actual constant value in a REG_EQUAL note, making
6800 a new one if one does not already exist. */
6801 if (tem)
6802 XEXP (tem, 0) = src_const;
6803 else
6804 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
6805 src_const, REG_NOTES (insn));
6807 /* If storing a constant value in a register that
6808 previously held the constant value 0,
6809 record this fact with a REG_WAS_0 note on this insn.
6811 Note that the *register* is required to have previously held 0,
6812 not just any register in the quantity and we must point to the
6813 insn that set that register to zero.
6815 Rather than track each register individually, we just see if
6816 the last set for this quantity was for this register. */
6818 if (REGNO_QTY_VALID_P (REGNO (dest))
6819 && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
6821 /* See if we previously had a REG_WAS_0 note. */
6822 rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
6823 rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
6825 if ((tem = single_set (const_insn)) != 0
6826 && rtx_equal_p (SET_DEST (tem), dest))
6828 if (note)
6829 XEXP (note, 0) = const_insn;
6830 else
6831 REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_WAS_0,
6832 const_insn, REG_NOTES (insn));
6837 /* Now deal with the destination. */
6838 do_not_record = 0;
6839 sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
6841 /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
6842 to the MEM or REG within it. */
6843 while (GET_CODE (dest) == SIGN_EXTRACT
6844 || GET_CODE (dest) == ZERO_EXTRACT
6845 || GET_CODE (dest) == SUBREG
6846 || GET_CODE (dest) == STRICT_LOW_PART)
6848 sets[i].inner_dest_loc = &XEXP (dest, 0);
6849 dest = XEXP (dest, 0);
6852 sets[i].inner_dest = dest;
6854 if (GET_CODE (dest) == MEM)
6856 dest = fold_rtx (dest, insn);
6858 /* Decide whether we invalidate everything in memory,
6859 or just things at non-fixed places.
6860 Writing a large aggregate must invalidate everything
6861 because we don't know how long it is. */
6862 note_mem_written (dest, &writes_memory);
6865 /* Compute the hash code of the destination now,
6866 before the effects of this instruction are recorded,
6867 since the register values used in the address computation
6868 are those before this instruction. */
6869 sets[i].dest_hash = HASH (dest, mode);
6871 /* Don't enter a bit-field in the hash table
6872 because the value in it after the store
6873 may not equal what was stored, due to truncation. */
6875 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6876 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6878 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6880 if (src_const != 0 && GET_CODE (src_const) == CONST_INT
6881 && GET_CODE (width) == CONST_INT
6882 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6883 && ! (INTVAL (src_const)
6884 & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6885 /* Exception: if the value is constant,
6886 and it won't be truncated, record it. */
6888 else
6890 /* This is chosen so that the destination will be invalidated
6891 but no new value will be recorded.
6892 We must invalidate because sometimes constant
6893 values can be recorded for bitfields. */
6894 sets[i].src_elt = 0;
6895 sets[i].src_volatile = 1;
6896 src_eqv = 0;
6897 src_eqv_elt = 0;
6901 /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
6902 the insn. */
6903 else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
6905 PUT_CODE (insn, NOTE);
6906 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
6907 NOTE_SOURCE_FILE (insn) = 0;
6908 cse_jumps_altered = 1;
6909 /* One less use of the label this insn used to jump to. */
6910 --LABEL_NUSES (JUMP_LABEL (insn));
6911 /* No more processing for this set. */
6912 sets[i].rtl = 0;
6915 /* If this SET is now setting PC to a label, we know it used to
6916 be a conditional or computed branch. So we see if we can follow
6917 it. If it was a computed branch, delete it and re-emit. */
6918 else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
6920 rtx p;
6922 /* If this is not in the format for a simple branch and
6923 we are the only SET in it, re-emit it. */
6924 if (! simplejump_p (insn) && n_sets == 1)
6926 rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
6927 JUMP_LABEL (new) = XEXP (src, 0);
6928 LABEL_NUSES (XEXP (src, 0))++;
6929 delete_insn (insn);
6930 insn = new;
6932 else
6933 /* Otherwise, force rerecognition, since it probably had
6934 a different pattern before.
6935 This shouldn't really be necessary, since whatever
6936 changed the source value above should have done this.
6937 Until the right place is found, might as well do this here. */
6938 INSN_CODE (insn) = -1;
6940 /* Now that we've converted this jump to an unconditional jump,
6941 there is dead code after it. Delete the dead code until we
6942 reach a BARRIER, the end of the function, or a label. Do
6943 not delete NOTEs except for NOTE_INSN_DELETED since later
6944 phases assume these notes are retained. */
6946 p = insn;
6948 while (NEXT_INSN (p) != 0
6949 && GET_CODE (NEXT_INSN (p)) != BARRIER
6950 && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
6952 if (GET_CODE (NEXT_INSN (p)) != NOTE
6953 || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
6954 delete_insn (NEXT_INSN (p));
6955 else
6956 p = NEXT_INSN (p);
6959 /* If we don't have a BARRIER immediately after INSN, put one there.
6960 Much code assumes that there are no NOTEs between a JUMP_INSN and
6961 BARRIER. */
6963 if (NEXT_INSN (insn) == 0
6964 || GET_CODE (NEXT_INSN (insn)) != BARRIER)
6965 emit_barrier_after (insn);
6967 /* We might have two BARRIERs separated by notes. Delete the second
6968 one if so. */
6970 if (p != insn && NEXT_INSN (p) != 0
6971 && GET_CODE (NEXT_INSN (p)) == BARRIER)
6972 delete_insn (NEXT_INSN (p));
6974 cse_jumps_altered = 1;
6975 sets[i].rtl = 0;
6978 /* If destination is volatile, invalidate it and then do no further
6979 processing for this assignment. */
6981 else if (do_not_record)
6983 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
6984 || GET_CODE (dest) == MEM)
6985 invalidate (dest, VOIDmode);
6986 else if (GET_CODE (dest) == STRICT_LOW_PART
6987 || GET_CODE (dest) == ZERO_EXTRACT)
6988 invalidate (XEXP (dest, 0), GET_MODE (dest));
6989 sets[i].rtl = 0;
6992 if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
6993 sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);
6995 #ifdef HAVE_cc0
6996 /* If setting CC0, record what it was set to, or a constant, if it
6997 is equivalent to a constant. If it is being set to a floating-point
6998 value, make a COMPARE with the appropriate constant of 0. If we
6999 don't do this, later code can interpret this as a test against
7000 const0_rtx, which can cause problems if we try to put it into an
7001 insn as a floating-point operand. */
7002 if (dest == cc0_rtx)
7004 this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
7005 this_insn_cc0_mode = mode;
7006 if (FLOAT_MODE_P (mode))
7007 this_insn_cc0 = gen_rtx (COMPARE, VOIDmode, this_insn_cc0,
7008 CONST0_RTX (mode));
7010 #endif
7013 /* Now enter all non-volatile source expressions in the hash table
7014 if they are not already present.
7015 Record their equivalence classes in src_elt.
7016 This way we can insert the corresponding destinations into
7017 the same classes even if the actual sources are no longer in them
7018 (having been invalidated). */
7020 if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
7021 && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
7023 register struct table_elt *elt;
7024 register struct table_elt *classp = sets[0].src_elt;
7025 rtx dest = SET_DEST (sets[0].rtl);
7026 enum machine_mode eqvmode = GET_MODE (dest);
7028 if (GET_CODE (dest) == STRICT_LOW_PART)
7030 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
7031 classp = 0;
7033 if (insert_regs (src_eqv, classp, 0))
7035 rehash_using_reg (src_eqv);
7036 src_eqv_hash = HASH (src_eqv, eqvmode);
7038 elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
7039 elt->in_memory = src_eqv_in_memory;
7040 elt->in_struct = src_eqv_in_struct;
7041 src_eqv_elt = elt;
7043 /* Check to see if src_eqv_elt is the same as a set source which
7044 does not yet have an elt, and if so set the elt of the set source
7045 to src_eqv_elt. */
7046 for (i = 0; i < n_sets; i++)
7047 if (sets[i].rtl && sets[i].src_elt == 0
7048 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
7049 sets[i].src_elt = src_eqv_elt;
7052 for (i = 0; i < n_sets; i++)
7053 if (sets[i].rtl && ! sets[i].src_volatile
7054 && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
7056 if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
7058 /* REG_EQUAL in setting a STRICT_LOW_PART
7059 gives an equivalent for the entire destination register,
7060 not just for the subreg being stored in now.
7061 This is a more interesting equivalence, so we arrange later
7062 to treat the entire reg as the destination. */
7063 sets[i].src_elt = src_eqv_elt;
7064 sets[i].src_hash = src_eqv_hash;
7066 else
7068 /* Insert source and constant equivalent into hash table, if not
7069 already present. */
7070 register struct table_elt *classp = src_eqv_elt;
7071 register rtx src = sets[i].src;
7072 register rtx dest = SET_DEST (sets[i].rtl);
7073 enum machine_mode mode
7074 = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
7076 if (sets[i].src_elt == 0)
7078 register struct table_elt *elt;
7080 /* Note that these insert_regs calls cannot remove
7081 any of the src_elt's, because they would have failed to
7082 match if not still valid. */
7083 if (insert_regs (src, classp, 0))
7085 rehash_using_reg (src);
7086 sets[i].src_hash = HASH (src, mode);
7088 elt = insert (src, classp, sets[i].src_hash, mode);
7089 elt->in_memory = sets[i].src_in_memory;
7090 elt->in_struct = sets[i].src_in_struct;
7091 sets[i].src_elt = classp = elt;
7094 if (sets[i].src_const && sets[i].src_const_elt == 0
7095 && src != sets[i].src_const
7096 && ! rtx_equal_p (sets[i].src_const, src))
7097 sets[i].src_elt = insert (sets[i].src_const, classp,
7098 sets[i].src_const_hash, mode);
7101 else if (sets[i].src_elt == 0)
7102 /* If we did not insert the source into the hash table (e.g., it was
7103 volatile), note the equivalence class for the REG_EQUAL value, if any,
7104 so that the destination goes into that class. */
7105 sets[i].src_elt = src_eqv_elt;
7107 invalidate_from_clobbers (&writes_memory, x);
7109 /* Some registers are invalidated by subroutine calls. Memory is
7110 invalidated by non-constant calls. */
7112 if (GET_CODE (insn) == CALL_INSN)
7114 static struct write_data everything = {0, 1, 1, 1};
7116 if (! CONST_CALL_P (insn))
7117 invalidate_memory (&everything);
7118 invalidate_for_call ();
7121 /* Now invalidate everything set by this instruction.
7122 If a SUBREG or other funny destination is being set,
7123 sets[i].rtl is still nonzero, so here we invalidate the reg
7124 a part of which is being set. */
7126 for (i = 0; i < n_sets; i++)
7127 if (sets[i].rtl)
7129 /* We can't use the inner dest, because the mode associated with
7130 a ZERO_EXTRACT is significant. */
7131 register rtx dest = SET_DEST (sets[i].rtl);
7133 /* Needed for registers to remove the register from its
7134 previous quantity's chain.
7135 Needed for memory if this is a nonvarying address, unless
7136 we have just done an invalidate_memory that covers even those. */
7137 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7138 || (GET_CODE (dest) == MEM && ! writes_memory.all
7139 && ! cse_rtx_addr_varies_p (dest)))
7140 invalidate (dest, VOIDmode);
7141 else if (GET_CODE (dest) == STRICT_LOW_PART
7142 || GET_CODE (dest) == ZERO_EXTRACT)
7143 invalidate (XEXP (dest, 0), GET_MODE (dest));
7146 /* Make sure registers mentioned in destinations
7147 are safe for use in an expression to be inserted.
7148 This removes from the hash table
7149 any invalid entry that refers to one of these registers.
7151 We don't care about the return value from mention_regs because
7152 we are going to hash the SET_DEST values unconditionally. */
7154 for (i = 0; i < n_sets; i++)
7155 if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
7156 mention_regs (SET_DEST (sets[i].rtl));
7158 /* We may have just removed some of the src_elt's from the hash table.
7159 So replace each one with the current head of the same class. */
7161 for (i = 0; i < n_sets; i++)
7162 if (sets[i].rtl)
7164 if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
7165 /* If elt was removed, find current head of same class,
7166 or 0 if nothing remains of that class. */
7168 register struct table_elt *elt = sets[i].src_elt;
7170 while (elt && elt->prev_same_value)
7171 elt = elt->prev_same_value;
7173 while (elt && elt->first_same_value == 0)
7174 elt = elt->next_same_value;
7175 sets[i].src_elt = elt ? elt->first_same_value : 0;
7179 /* Now insert the destinations into their equivalence classes. */
7181 for (i = 0; i < n_sets; i++)
7182 if (sets[i].rtl)
7184 register rtx dest = SET_DEST (sets[i].rtl);
7185 register struct table_elt *elt;
7187 /* Don't record value if we are not supposed to risk allocating
7188 floating-point values in registers that might be wider than
7189 memory. */
7190 if ((flag_float_store
7191 && GET_CODE (dest) == MEM
7192 && FLOAT_MODE_P (GET_MODE (dest)))
7193 /* Don't record values of destinations set inside a libcall block
7194 since we might delete the libcall. Things should have been set
7195 up so we won't want to reuse such a value, but we play it safe
7196 here. */
7197 || in_libcall_block
7198 /* If we didn't put a REG_EQUAL value or a source into the hash
7199 table, there is no point is recording DEST. */
7200 || sets[i].src_elt == 0
7201 /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND
7202 or SIGN_EXTEND, don't record DEST since it can cause
7203 some tracking to be wrong.
7205 ??? Think about this more later. */
7206 || (GET_CODE (dest) == SUBREG
7207 && (GET_MODE_SIZE (GET_MODE (dest))
7208 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7209 && (GET_CODE (sets[i].src) == SIGN_EXTEND
7210 || GET_CODE (sets[i].src) == ZERO_EXTEND)))
7211 continue;
7213 /* STRICT_LOW_PART isn't part of the value BEING set,
7214 and neither is the SUBREG inside it.
7215 Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
7216 if (GET_CODE (dest) == STRICT_LOW_PART)
7217 dest = SUBREG_REG (XEXP (dest, 0));
7219 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
7220 /* Registers must also be inserted into chains for quantities. */
7221 if (insert_regs (dest, sets[i].src_elt, 1))
7223 /* If `insert_regs' changes something, the hash code must be
7224 recalculated. */
7225 rehash_using_reg (dest);
7226 sets[i].dest_hash = HASH (dest, GET_MODE (dest));
7229 elt = insert (dest, sets[i].src_elt,
7230 sets[i].dest_hash, GET_MODE (dest));
7231 elt->in_memory = GET_CODE (sets[i].inner_dest) == MEM;
7232 if (elt->in_memory)
7234 /* This implicitly assumes a whole struct
7235 need not have MEM_IN_STRUCT_P.
7236 But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
7237 elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
7238 || sets[i].inner_dest != SET_DEST (sets[i].rtl));
7241 /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
7242 narrower than M2, and both M1 and M2 are the same number of words,
7243 we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
7244 make that equivalence as well.
7246 However, BAR may have equivalences for which gen_lowpart_if_possible
7247 will produce a simpler value than gen_lowpart_if_possible applied to
7248 BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
7249 BAR's equivalences. If we don't get a simplified form, make
7250 the SUBREG. It will not be used in an equivalence, but will
7251 cause two similar assignments to be detected.
7253 Note the loop below will find SUBREG_REG (DEST) since we have
7254 already entered SRC and DEST of the SET in the table. */
7256 if (GET_CODE (dest) == SUBREG
7257 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
7258 / UNITS_PER_WORD)
7259 == (GET_MODE_SIZE (GET_MODE (dest)) - 1)/ UNITS_PER_WORD)
7260 && (GET_MODE_SIZE (GET_MODE (dest))
7261 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7262 && sets[i].src_elt != 0)
7264 enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
7265 struct table_elt *elt, *classp = 0;
7267 for (elt = sets[i].src_elt->first_same_value; elt;
7268 elt = elt->next_same_value)
7270 rtx new_src = 0;
7271 unsigned src_hash;
7272 struct table_elt *src_elt;
7274 /* Ignore invalid entries. */
7275 if (GET_CODE (elt->exp) != REG
7276 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
7277 continue;
7279 new_src = gen_lowpart_if_possible (new_mode, elt->exp);
7280 if (new_src == 0)
7281 new_src = gen_rtx (SUBREG, new_mode, elt->exp, 0);
7283 src_hash = HASH (new_src, new_mode);
7284 src_elt = lookup (new_src, src_hash, new_mode);
7286 /* Put the new source in the hash table is if isn't
7287 already. */
7288 if (src_elt == 0)
7290 if (insert_regs (new_src, classp, 0))
7292 rehash_using_reg (new_src);
7293 src_hash = HASH (new_src, new_mode);
7295 src_elt = insert (new_src, classp, src_hash, new_mode);
7296 src_elt->in_memory = elt->in_memory;
7297 src_elt->in_struct = elt->in_struct;
7299 else if (classp && classp != src_elt->first_same_value)
7300 /* Show that two things that we've seen before are
7301 actually the same. */
7302 merge_equiv_classes (src_elt, classp);
7304 classp = src_elt->first_same_value;
7309 /* Special handling for (set REG0 REG1)
7310 where REG0 is the "cheapest", cheaper than REG1.
7311 After cse, REG1 will probably not be used in the sequel,
7312 so (if easily done) change this insn to (set REG1 REG0) and
7313 replace REG1 with REG0 in the previous insn that computed their value.
7314 Then REG1 will become a dead store and won't cloud the situation
7315 for later optimizations.
7317 Do not make this change if REG1 is a hard register, because it will
7318 then be used in the sequel and we may be changing a two-operand insn
7319 into a three-operand insn.
7321 Also do not do this if we are operating on a copy of INSN. */
7323 if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
7324 && NEXT_INSN (PREV_INSN (insn)) == insn
7325 && GET_CODE (SET_SRC (sets[0].rtl)) == REG
7326 && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
7327 && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
7328 && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
7329 == REGNO (SET_DEST (sets[0].rtl))))
7331 rtx prev = PREV_INSN (insn);
7332 while (prev && GET_CODE (prev) == NOTE)
7333 prev = PREV_INSN (prev);
7335 if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
7336 && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
7338 rtx dest = SET_DEST (sets[0].rtl);
7339 rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
7341 validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
7342 validate_change (insn, & SET_DEST (sets[0].rtl),
7343 SET_SRC (sets[0].rtl), 1);
7344 validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
7345 apply_change_group ();
7347 /* If REG1 was equivalent to a constant, REG0 is not. */
7348 if (note)
7349 PUT_REG_NOTE_KIND (note, REG_EQUAL);
7351 /* If there was a REG_WAS_0 note on PREV, remove it. Move
7352 any REG_WAS_0 note on INSN to PREV. */
7353 note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
7354 if (note)
7355 remove_note (prev, note);
7357 note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7358 if (note)
7360 remove_note (insn, note);
7361 XEXP (note, 1) = REG_NOTES (prev);
7362 REG_NOTES (prev) = note;
7367 /* If this is a conditional jump insn, record any known equivalences due to
7368 the condition being tested. */
7370 last_jump_equiv_class = 0;
7371 if (GET_CODE (insn) == JUMP_INSN
7372 && n_sets == 1 && GET_CODE (x) == SET
7373 && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
7374 record_jump_equiv (insn, 0);
7376 #ifdef HAVE_cc0
7377 /* If the previous insn set CC0 and this insn no longer references CC0,
7378 delete the previous insn. Here we use the fact that nothing expects CC0
7379 to be valid over an insn, which is true until the final pass. */
7380 if (prev_insn && GET_CODE (prev_insn) == INSN
7381 && (tem = single_set (prev_insn)) != 0
7382 && SET_DEST (tem) == cc0_rtx
7383 && ! reg_mentioned_p (cc0_rtx, x))
7385 PUT_CODE (prev_insn, NOTE);
7386 NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
7387 NOTE_SOURCE_FILE (prev_insn) = 0;
7390 prev_insn_cc0 = this_insn_cc0;
7391 prev_insn_cc0_mode = this_insn_cc0_mode;
7392 #endif
7394 prev_insn = insn;
7397 /* Store 1 in *WRITES_PTR for those categories of memory ref
7398 that must be invalidated when the expression WRITTEN is stored in.
7399 If WRITTEN is null, say everything must be invalidated. */
7401 static void
7402 note_mem_written (written, writes_ptr)
7403 rtx written;
7404 struct write_data *writes_ptr;
7406 static struct write_data everything = {0, 1, 1, 1};
7408 if (written == 0)
7409 *writes_ptr = everything;
7410 else if (GET_CODE (written) == MEM)
7412 /* Pushing or popping the stack invalidates just the stack pointer. */
7413 rtx addr = XEXP (written, 0);
7414 if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
7415 || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
7416 && GET_CODE (XEXP (addr, 0)) == REG
7417 && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
7419 writes_ptr->sp = 1;
7420 return;
7422 else if (GET_MODE (written) == BLKmode)
7423 *writes_ptr = everything;
7424 /* (mem (scratch)) means clobber everything. */
7425 else if (GET_CODE (addr) == SCRATCH)
7426 *writes_ptr = everything;
7427 else if (cse_rtx_addr_varies_p (written))
7429 /* A varying address that is a sum indicates an array element,
7430 and that's just as good as a structure element
7431 in implying that we need not invalidate scalar variables.
7432 However, we must allow QImode aliasing of scalars, because the
7433 ANSI C standard allows character pointers to alias anything. */
7434 if (! ((MEM_IN_STRUCT_P (written)
7435 || GET_CODE (XEXP (written, 0)) == PLUS)
7436 && GET_MODE (written) != QImode))
7437 writes_ptr->all = 1;
7438 writes_ptr->nonscalar = 1;
7440 writes_ptr->var = 1;
7444 /* Perform invalidation on the basis of everything about an insn
7445 except for invalidating the actual places that are SET in it.
7446 This includes the places CLOBBERed, and anything that might
7447 alias with something that is SET or CLOBBERed.
7449 W points to the writes_memory for this insn, a struct write_data
7450 saying which kinds of memory references must be invalidated.
7451 X is the pattern of the insn. */
7453 static void
7454 invalidate_from_clobbers (w, x)
7455 struct write_data *w;
7456 rtx x;
7458 /* If W->var is not set, W specifies no action.
7459 If W->all is set, this step gets all memory refs
7460 so they can be ignored in the rest of this function. */
7461 if (w->var)
7462 invalidate_memory (w);
7464 if (w->sp)
7466 if (reg_tick[STACK_POINTER_REGNUM] >= 0)
7467 reg_tick[STACK_POINTER_REGNUM]++;
7469 /* This should be *very* rare. */
7470 if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
7471 invalidate (stack_pointer_rtx, VOIDmode);
7474 if (GET_CODE (x) == CLOBBER)
7476 rtx ref = XEXP (x, 0);
7477 if (ref)
7479 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7480 || (GET_CODE (ref) == MEM && ! w->all))
7481 invalidate (ref, VOIDmode);
7482 else if (GET_CODE (ref) == STRICT_LOW_PART
7483 || GET_CODE (ref) == ZERO_EXTRACT)
7484 invalidate (XEXP (ref, 0), GET_MODE (ref));
7487 else if (GET_CODE (x) == PARALLEL)
7489 register int i;
7490 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7492 register rtx y = XVECEXP (x, 0, i);
7493 if (GET_CODE (y) == CLOBBER)
7495 rtx ref = XEXP (y, 0);
7496 if (ref)
7498 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7499 || (GET_CODE (ref) == MEM && !w->all))
7500 invalidate (ref, VOIDmode);
7501 else if (GET_CODE (ref) == STRICT_LOW_PART
7502 || GET_CODE (ref) == ZERO_EXTRACT)
7503 invalidate (XEXP (ref, 0), GET_MODE (ref));
7510 /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
7511 and replace any registers in them with either an equivalent constant
7512 or the canonical form of the register. If we are inside an address,
7513 only do this if the address remains valid.
7515 OBJECT is 0 except when within a MEM in which case it is the MEM.
7517 Return the replacement for X. */
7519 static rtx
7520 cse_process_notes (x, object)
7521 rtx x;
7522 rtx object;
7524 enum rtx_code code = GET_CODE (x);
7525 char *fmt = GET_RTX_FORMAT (code);
7526 int i;
7528 switch (code)
7530 case CONST_INT:
7531 case CONST:
7532 case SYMBOL_REF:
7533 case LABEL_REF:
7534 case CONST_DOUBLE:
7535 case PC:
7536 case CC0:
7537 case LO_SUM:
7538 return x;
7540 case MEM:
7541 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
7542 return x;
7544 case EXPR_LIST:
7545 case INSN_LIST:
7546 if (REG_NOTE_KIND (x) == REG_EQUAL)
7547 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
7548 if (XEXP (x, 1))
7549 XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
7550 return x;
7552 case SIGN_EXTEND:
7553 case ZERO_EXTEND:
7555 rtx new = cse_process_notes (XEXP (x, 0), object);
7556 /* We don't substitute VOIDmode constants into these rtx,
7557 since they would impede folding. */
7558 if (GET_MODE (new) != VOIDmode)
7559 validate_change (object, &XEXP (x, 0), new, 0);
7560 return x;
7563 case REG:
7564 i = reg_qty[REGNO (x)];
7566 /* Return a constant or a constant register. */
7567 if (REGNO_QTY_VALID_P (REGNO (x))
7568 && qty_const[i] != 0
7569 && (CONSTANT_P (qty_const[i])
7570 || GET_CODE (qty_const[i]) == REG))
7572 rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
7573 if (new)
7574 return new;
7577 /* Otherwise, canonicalize this register. */
7578 return canon_reg (x, NULL_RTX);
7581 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7582 if (fmt[i] == 'e')
7583 validate_change (object, &XEXP (x, i),
7584 cse_process_notes (XEXP (x, i), object), 0);
7586 return x;
7589 /* Find common subexpressions between the end test of a loop and the beginning
7590 of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
7592 Often we have a loop where an expression in the exit test is used
7593 in the body of the loop. For example "while (*p) *q++ = *p++;".
7594 Because of the way we duplicate the loop exit test in front of the loop,
7595 however, we don't detect that common subexpression. This will be caught
7596 when global cse is implemented, but this is a quite common case.
7598 This function handles the most common cases of these common expressions.
7599 It is called after we have processed the basic block ending with the
7600 NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
7601 jumps to a label used only once. */
7603 static void
7604 cse_around_loop (loop_start)
7605 rtx loop_start;
7607 rtx insn;
7608 int i;
7609 struct table_elt *p;
7611 /* If the jump at the end of the loop doesn't go to the start, we don't
7612 do anything. */
7613 for (insn = PREV_INSN (loop_start);
7614 insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
7615 insn = PREV_INSN (insn))
7618 if (insn == 0
7619 || GET_CODE (insn) != NOTE
7620 || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
7621 return;
7623 /* If the last insn of the loop (the end test) was an NE comparison,
7624 we will interpret it as an EQ comparison, since we fell through
7625 the loop. Any equivalences resulting from that comparison are
7626 therefore not valid and must be invalidated. */
7627 if (last_jump_equiv_class)
7628 for (p = last_jump_equiv_class->first_same_value; p;
7629 p = p->next_same_value)
7630 if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
7631 || (GET_CODE (p->exp) == SUBREG
7632 && GET_CODE (SUBREG_REG (p->exp)) == REG))
7633 invalidate (p->exp, VOIDmode);
7634 else if (GET_CODE (p->exp) == STRICT_LOW_PART
7635 || GET_CODE (p->exp) == ZERO_EXTRACT)
7636 invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
7638 /* Process insns starting after LOOP_START until we hit a CALL_INSN or
7639 a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
7641 The only thing we do with SET_DEST is invalidate entries, so we
7642 can safely process each SET in order. It is slightly less efficient
7643 to do so, but we only want to handle the most common cases. */
7645 for (insn = NEXT_INSN (loop_start);
7646 GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
7647 && ! (GET_CODE (insn) == NOTE
7648 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
7649 insn = NEXT_INSN (insn))
7651 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7652 && (GET_CODE (PATTERN (insn)) == SET
7653 || GET_CODE (PATTERN (insn)) == CLOBBER))
7654 cse_set_around_loop (PATTERN (insn), insn, loop_start);
7655 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7656 && GET_CODE (PATTERN (insn)) == PARALLEL)
7657 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
7658 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
7659 || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
7660 cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
7661 loop_start);
7665 /* Variable used for communications between the next two routines. */
7667 static struct write_data skipped_writes_memory;
7669 /* Process one SET of an insn that was skipped. We ignore CLOBBERs
7670 since they are done elsewhere. This function is called via note_stores. */
7672 static void
7673 invalidate_skipped_set (dest, set)
7674 rtx set;
7675 rtx dest;
7677 if (GET_CODE (set) == CLOBBER
7678 #ifdef HAVE_cc0
7679 || dest == cc0_rtx
7680 #endif
7681 || dest == pc_rtx)
7682 return;
7684 if (GET_CODE (dest) == MEM)
7685 note_mem_written (dest, &skipped_writes_memory);
7687 /* There are times when an address can appear varying and be a PLUS
7688 during this scan when it would be a fixed address were we to know
7689 the proper equivalences. So promote "nonscalar" to be "all". */
7690 if (skipped_writes_memory.nonscalar)
7691 skipped_writes_memory.all = 1;
7693 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7694 || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
7695 invalidate (dest, VOIDmode);
7696 else if (GET_CODE (dest) == STRICT_LOW_PART
7697 || GET_CODE (dest) == ZERO_EXTRACT)
7698 invalidate (XEXP (dest, 0), GET_MODE (dest));
7701 /* Invalidate all insns from START up to the end of the function or the
7702 next label. This called when we wish to CSE around a block that is
7703 conditionally executed. */
7705 static void
7706 invalidate_skipped_block (start)
7707 rtx start;
7709 rtx insn;
7710 static struct write_data init = {0, 0, 0, 0};
7711 static struct write_data everything = {0, 1, 1, 1};
7713 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
7714 insn = NEXT_INSN (insn))
7716 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7717 continue;
7719 skipped_writes_memory = init;
7721 if (GET_CODE (insn) == CALL_INSN)
7723 invalidate_for_call ();
7724 skipped_writes_memory = everything;
7727 note_stores (PATTERN (insn), invalidate_skipped_set);
7728 invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn));
7732 /* Used for communication between the following two routines; contains a
7733 value to be checked for modification. */
7735 static rtx cse_check_loop_start_value;
7737 /* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
7738 indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
7740 static void
7741 cse_check_loop_start (x, set)
7742 rtx x;
7743 rtx set;
7745 if (cse_check_loop_start_value == 0
7746 || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
7747 return;
7749 if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
7750 || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
7751 cse_check_loop_start_value = 0;
7754 /* X is a SET or CLOBBER contained in INSN that was found near the start of
7755 a loop that starts with the label at LOOP_START.
7757 If X is a SET, we see if its SET_SRC is currently in our hash table.
7758 If so, we see if it has a value equal to some register used only in the
7759 loop exit code (as marked by jump.c).
7761 If those two conditions are true, we search backwards from the start of
7762 the loop to see if that same value was loaded into a register that still
7763 retains its value at the start of the loop.
7765 If so, we insert an insn after the load to copy the destination of that
7766 load into the equivalent register and (try to) replace our SET_SRC with that
7767 register.
7769 In any event, we invalidate whatever this SET or CLOBBER modifies. */
7771 static void
7772 cse_set_around_loop (x, insn, loop_start)
7773 rtx x;
7774 rtx insn;
7775 rtx loop_start;
7777 struct table_elt *src_elt;
7778 static struct write_data init = {0, 0, 0, 0};
7779 struct write_data writes_memory;
7781 writes_memory = init;
7783 /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
7784 are setting PC or CC0 or whose SET_SRC is already a register. */
7785 if (GET_CODE (x) == SET
7786 && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
7787 && GET_CODE (SET_SRC (x)) != REG)
7789 src_elt = lookup (SET_SRC (x),
7790 HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
7791 GET_MODE (SET_DEST (x)));
7793 if (src_elt)
7794 for (src_elt = src_elt->first_same_value; src_elt;
7795 src_elt = src_elt->next_same_value)
7796 if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
7797 && COST (src_elt->exp) < COST (SET_SRC (x)))
7799 rtx p, set;
7801 /* Look for an insn in front of LOOP_START that sets
7802 something in the desired mode to SET_SRC (x) before we hit
7803 a label or CALL_INSN. */
7805 for (p = prev_nonnote_insn (loop_start);
7806 p && GET_CODE (p) != CALL_INSN
7807 && GET_CODE (p) != CODE_LABEL;
7808 p = prev_nonnote_insn (p))
7809 if ((set = single_set (p)) != 0
7810 && GET_CODE (SET_DEST (set)) == REG
7811 && GET_MODE (SET_DEST (set)) == src_elt->mode
7812 && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
7814 /* We now have to ensure that nothing between P
7815 and LOOP_START modified anything referenced in
7816 SET_SRC (x). We know that nothing within the loop
7817 can modify it, or we would have invalidated it in
7818 the hash table. */
7819 rtx q;
7821 cse_check_loop_start_value = SET_SRC (x);
7822 for (q = p; q != loop_start; q = NEXT_INSN (q))
7823 if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
7824 note_stores (PATTERN (q), cse_check_loop_start);
7826 /* If nothing was changed and we can replace our
7827 SET_SRC, add an insn after P to copy its destination
7828 to what we will be replacing SET_SRC with. */
7829 if (cse_check_loop_start_value
7830 && validate_change (insn, &SET_SRC (x),
7831 src_elt->exp, 0))
7832 emit_insn_after (gen_move_insn (src_elt->exp,
7833 SET_DEST (set)),
7835 break;
7840 /* Now invalidate anything modified by X. */
7841 note_mem_written (SET_DEST (x), &writes_memory);
7843 if (writes_memory.var)
7844 invalidate_memory (&writes_memory);
7846 /* See comment on similar code in cse_insn for explanation of these tests. */
7847 if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
7848 || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all
7849 && ! cse_rtx_addr_varies_p (SET_DEST (x))))
7850 invalidate (SET_DEST (x), VOIDmode);
7851 else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7852 || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
7853 invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
7856 /* Find the end of INSN's basic block and return its range,
7857 the total number of SETs in all the insns of the block, the last insn of the
7858 block, and the branch path.
7860 The branch path indicates which branches should be followed. If a non-zero
7861 path size is specified, the block should be rescanned and a different set
7862 of branches will be taken. The branch path is only used if
7863 FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
7865 DATA is a pointer to a struct cse_basic_block_data, defined below, that is
7866 used to describe the block. It is filled in with the information about
7867 the current block. The incoming structure's branch path, if any, is used
7868 to construct the output branch path. */
7870 void
7871 cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
7872 rtx insn;
7873 struct cse_basic_block_data *data;
7874 int follow_jumps;
7875 int after_loop;
7876 int skip_blocks;
7878 rtx p = insn, q;
7879 int nsets = 0;
7880 int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
7881 rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
7882 int path_size = data->path_size;
7883 int path_entry = 0;
7884 int i;
7886 /* Update the previous branch path, if any. If the last branch was
7887 previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
7888 shorten the path by one and look at the previous branch. We know that
7889 at least one branch must have been taken if PATH_SIZE is non-zero. */
7890 while (path_size > 0)
7892 if (data->path[path_size - 1].status != NOT_TAKEN)
7894 data->path[path_size - 1].status = NOT_TAKEN;
7895 break;
7897 else
7898 path_size--;
7901 /* Scan to end of this basic block. */
7902 while (p && GET_CODE (p) != CODE_LABEL)
7904 /* Don't cse out the end of a loop. This makes a difference
7905 only for the unusual loops that always execute at least once;
7906 all other loops have labels there so we will stop in any case.
7907 Cse'ing out the end of the loop is dangerous because it
7908 might cause an invariant expression inside the loop
7909 to be reused after the end of the loop. This would make it
7910 hard to move the expression out of the loop in loop.c,
7911 especially if it is one of several equivalent expressions
7912 and loop.c would like to eliminate it.
7914 If we are running after loop.c has finished, we can ignore
7915 the NOTE_INSN_LOOP_END. */
7917 if (! after_loop && GET_CODE (p) == NOTE
7918 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
7919 break;
7921 /* Don't cse over a call to setjmp; on some machines (eg vax)
7922 the regs restored by the longjmp come from
7923 a later time than the setjmp. */
7924 if (GET_CODE (p) == NOTE
7925 && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
7926 break;
7928 /* A PARALLEL can have lots of SETs in it,
7929 especially if it is really an ASM_OPERANDS. */
7930 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
7931 && GET_CODE (PATTERN (p)) == PARALLEL)
7932 nsets += XVECLEN (PATTERN (p), 0);
7933 else if (GET_CODE (p) != NOTE)
7934 nsets += 1;
7936 /* Ignore insns made by CSE; they cannot affect the boundaries of
7937 the basic block. */
7939 if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
7940 high_cuid = INSN_CUID (p);
7941 if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
7942 low_cuid = INSN_CUID (p);
7944 /* See if this insn is in our branch path. If it is and we are to
7945 take it, do so. */
7946 if (path_entry < path_size && data->path[path_entry].branch == p)
7948 if (data->path[path_entry].status != NOT_TAKEN)
7949 p = JUMP_LABEL (p);
7951 /* Point to next entry in path, if any. */
7952 path_entry++;
7955 /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
7956 was specified, we haven't reached our maximum path length, there are
7957 insns following the target of the jump, this is the only use of the
7958 jump label, and the target label is preceded by a BARRIER.
7960 Alternatively, we can follow the jump if it branches around a
7961 block of code and there are no other branches into the block.
7962 In this case invalidate_skipped_block will be called to invalidate any
7963 registers set in the block when following the jump. */
7965 else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
7966 && GET_CODE (p) == JUMP_INSN
7967 && GET_CODE (PATTERN (p)) == SET
7968 && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
7969 && LABEL_NUSES (JUMP_LABEL (p)) == 1
7970 && NEXT_INSN (JUMP_LABEL (p)) != 0)
7972 for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
7973 if ((GET_CODE (q) != NOTE
7974 || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
7975 || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
7976 && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
7977 break;
7979 /* If we ran into a BARRIER, this code is an extension of the
7980 basic block when the branch is taken. */
7981 if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
7983 /* Don't allow ourself to keep walking around an
7984 always-executed loop. */
7985 if (next_real_insn (q) == next)
7987 p = NEXT_INSN (p);
7988 continue;
7991 /* Similarly, don't put a branch in our path more than once. */
7992 for (i = 0; i < path_entry; i++)
7993 if (data->path[i].branch == p)
7994 break;
7996 if (i != path_entry)
7997 break;
7999 data->path[path_entry].branch = p;
8000 data->path[path_entry++].status = TAKEN;
8002 /* This branch now ends our path. It was possible that we
8003 didn't see this branch the last time around (when the
8004 insn in front of the target was a JUMP_INSN that was
8005 turned into a no-op). */
8006 path_size = path_entry;
8008 p = JUMP_LABEL (p);
8009 /* Mark block so we won't scan it again later. */
8010 PUT_MODE (NEXT_INSN (p), QImode);
8012 /* Detect a branch around a block of code. */
8013 else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
8015 register rtx tmp;
8017 if (next_real_insn (q) == next)
8019 p = NEXT_INSN (p);
8020 continue;
8023 for (i = 0; i < path_entry; i++)
8024 if (data->path[i].branch == p)
8025 break;
8027 if (i != path_entry)
8028 break;
8030 /* This is no_labels_between_p (p, q) with an added check for
8031 reaching the end of a function (in case Q precedes P). */
8032 for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
8033 if (GET_CODE (tmp) == CODE_LABEL)
8034 break;
8036 if (tmp == q)
8038 data->path[path_entry].branch = p;
8039 data->path[path_entry++].status = AROUND;
8041 path_size = path_entry;
8043 p = JUMP_LABEL (p);
8044 /* Mark block so we won't scan it again later. */
8045 PUT_MODE (NEXT_INSN (p), QImode);
8049 p = NEXT_INSN (p);
8052 data->low_cuid = low_cuid;
8053 data->high_cuid = high_cuid;
8054 data->nsets = nsets;
8055 data->last = p;
8057 /* If all jumps in the path are not taken, set our path length to zero
8058 so a rescan won't be done. */
8059 for (i = path_size - 1; i >= 0; i--)
8060 if (data->path[i].status != NOT_TAKEN)
8061 break;
8063 if (i == -1)
8064 data->path_size = 0;
8065 else
8066 data->path_size = path_size;
8068 /* End the current branch path. */
8069 data->path[path_size].branch = 0;
8072 /* Perform cse on the instructions of a function.
8073 F is the first instruction.
8074 NREGS is one plus the highest pseudo-reg number used in the instruction.
8076 AFTER_LOOP is 1 if this is the cse call done after loop optimization
8077 (only if -frerun-cse-after-loop).
8079 Returns 1 if jump_optimize should be redone due to simplifications
8080 in conditional jump instructions. */
8083 cse_main (f, nregs, after_loop, file)
8084 rtx f;
8085 int nregs;
8086 int after_loop;
8087 FILE *file;
8089 struct cse_basic_block_data val;
8090 register rtx insn = f;
8091 register int i;
8093 cse_jumps_altered = 0;
8094 constant_pool_entries_cost = 0;
8095 val.path_size = 0;
8097 init_recog ();
8099 max_reg = nregs;
8101 all_minus_one = (int *) alloca (nregs * sizeof (int));
8102 consec_ints = (int *) alloca (nregs * sizeof (int));
8104 for (i = 0; i < nregs; i++)
8106 all_minus_one[i] = -1;
8107 consec_ints[i] = i;
8110 reg_next_eqv = (int *) alloca (nregs * sizeof (int));
8111 reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
8112 reg_qty = (int *) alloca (nregs * sizeof (int));
8113 reg_in_table = (int *) alloca (nregs * sizeof (int));
8114 reg_tick = (int *) alloca (nregs * sizeof (int));
8116 #ifdef LOAD_EXTEND_OP
8118 /* Allocate scratch rtl here. cse_insn will fill in the memory reference
8119 and change the code and mode as appropriate. */
8120 memory_extend_rtx = gen_rtx (ZERO_EXTEND, VOIDmode, 0);
8121 #endif
8123 /* Discard all the free elements of the previous function
8124 since they are allocated in the temporarily obstack. */
8125 bzero ((char *) table, sizeof table);
8126 free_element_chain = 0;
8127 n_elements_made = 0;
8129 /* Find the largest uid. */
8131 max_uid = get_max_uid ();
8132 uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
8133 bzero ((char *) uid_cuid, (max_uid + 1) * sizeof (int));
8135 /* Compute the mapping from uids to cuids.
8136 CUIDs are numbers assigned to insns, like uids,
8137 except that cuids increase monotonically through the code.
8138 Don't assign cuids to line-number NOTEs, so that the distance in cuids
8139 between two insns is not affected by -g. */
8141 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
8143 if (GET_CODE (insn) != NOTE
8144 || NOTE_LINE_NUMBER (insn) < 0)
8145 INSN_CUID (insn) = ++i;
8146 else
8147 /* Give a line number note the same cuid as preceding insn. */
8148 INSN_CUID (insn) = i;
8151 /* Initialize which registers are clobbered by calls. */
8153 CLEAR_HARD_REG_SET (regs_invalidated_by_call);
8155 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8156 if ((call_used_regs[i]
8157 /* Used to check !fixed_regs[i] here, but that isn't safe;
8158 fixed regs are still call-clobbered, and sched can get
8159 confused if they can "live across calls".
8161 The frame pointer is always preserved across calls. The arg
8162 pointer is if it is fixed. The stack pointer usually is, unless
8163 RETURN_POPS_ARGS, in which case an explicit CLOBBER
8164 will be present. If we are generating PIC code, the PIC offset
8165 table register is preserved across calls. */
8167 && i != STACK_POINTER_REGNUM
8168 && i != FRAME_POINTER_REGNUM
8169 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
8170 && i != HARD_FRAME_POINTER_REGNUM
8171 #endif
8172 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
8173 && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
8174 #endif
8175 #if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
8176 && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
8177 #endif
8179 || global_regs[i])
8180 SET_HARD_REG_BIT (regs_invalidated_by_call, i);
8182 /* Loop over basic blocks.
8183 Compute the maximum number of qty's needed for each basic block
8184 (which is 2 for each SET). */
8185 insn = f;
8186 while (insn)
8188 cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
8189 flag_cse_skip_blocks);
8191 /* If this basic block was already processed or has no sets, skip it. */
8192 if (val.nsets == 0 || GET_MODE (insn) == QImode)
8194 PUT_MODE (insn, VOIDmode);
8195 insn = (val.last ? NEXT_INSN (val.last) : 0);
8196 val.path_size = 0;
8197 continue;
8200 cse_basic_block_start = val.low_cuid;
8201 cse_basic_block_end = val.high_cuid;
8202 max_qty = val.nsets * 2;
8204 if (file)
8205 fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
8206 INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
8207 val.nsets);
8209 /* Make MAX_QTY bigger to give us room to optimize
8210 past the end of this basic block, if that should prove useful. */
8211 if (max_qty < 500)
8212 max_qty = 500;
8214 max_qty += max_reg;
8216 /* If this basic block is being extended by following certain jumps,
8217 (see `cse_end_of_basic_block'), we reprocess the code from the start.
8218 Otherwise, we start after this basic block. */
8219 if (val.path_size > 0)
8220 cse_basic_block (insn, val.last, val.path, 0);
8221 else
8223 int old_cse_jumps_altered = cse_jumps_altered;
8224 rtx temp;
8226 /* When cse changes a conditional jump to an unconditional
8227 jump, we want to reprocess the block, since it will give
8228 us a new branch path to investigate. */
8229 cse_jumps_altered = 0;
8230 temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
8231 if (cse_jumps_altered == 0
8232 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8233 insn = temp;
8235 cse_jumps_altered |= old_cse_jumps_altered;
8238 #ifdef USE_C_ALLOCA
8239 alloca (0);
8240 #endif
8243 /* Tell refers_to_mem_p that qty_const info is not available. */
8244 qty_const = 0;
8246 if (max_elements_made < n_elements_made)
8247 max_elements_made = n_elements_made;
8249 return cse_jumps_altered;
8252 /* Process a single basic block. FROM and TO and the limits of the basic
8253 block. NEXT_BRANCH points to the branch path when following jumps or
8254 a null path when not following jumps.
8256 AROUND_LOOP is non-zero if we are to try to cse around to the start of a
8257 loop. This is true when we are being called for the last time on a
8258 block and this CSE pass is before loop.c. */
8260 static rtx
8261 cse_basic_block (from, to, next_branch, around_loop)
8262 register rtx from, to;
8263 struct branch_path *next_branch;
8264 int around_loop;
8266 register rtx insn;
8267 int to_usage = 0;
8268 int in_libcall_block = 0;
8270 /* Each of these arrays is undefined before max_reg, so only allocate
8271 the space actually needed and adjust the start below. */
8273 qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8274 qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8275 qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
8276 qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8277 qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8278 qty_comparison_code
8279 = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
8280 qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8281 qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8283 qty_first_reg -= max_reg;
8284 qty_last_reg -= max_reg;
8285 qty_mode -= max_reg;
8286 qty_const -= max_reg;
8287 qty_const_insn -= max_reg;
8288 qty_comparison_code -= max_reg;
8289 qty_comparison_qty -= max_reg;
8290 qty_comparison_const -= max_reg;
8292 new_basic_block ();
8294 /* TO might be a label. If so, protect it from being deleted. */
8295 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8296 ++LABEL_NUSES (to);
8298 for (insn = from; insn != to; insn = NEXT_INSN (insn))
8300 register enum rtx_code code;
8302 /* See if this is a branch that is part of the path. If so, and it is
8303 to be taken, do so. */
8304 if (next_branch->branch == insn)
8306 enum taken status = next_branch++->status;
8307 if (status != NOT_TAKEN)
8309 if (status == TAKEN)
8310 record_jump_equiv (insn, 1);
8311 else
8312 invalidate_skipped_block (NEXT_INSN (insn));
8314 /* Set the last insn as the jump insn; it doesn't affect cc0.
8315 Then follow this branch. */
8316 #ifdef HAVE_cc0
8317 prev_insn_cc0 = 0;
8318 #endif
8319 prev_insn = insn;
8320 insn = JUMP_LABEL (insn);
8321 continue;
8325 code = GET_CODE (insn);
8326 if (GET_MODE (insn) == QImode)
8327 PUT_MODE (insn, VOIDmode);
8329 if (GET_RTX_CLASS (code) == 'i')
8331 /* Process notes first so we have all notes in canonical forms when
8332 looking for duplicate operations. */
8334 if (REG_NOTES (insn))
8335 REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
8337 /* Track when we are inside in LIBCALL block. Inside such a block,
8338 we do not want to record destinations. The last insn of a
8339 LIBCALL block is not considered to be part of the block, since
8340 its destination is the result of the block and hence should be
8341 recorded. */
8343 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8344 in_libcall_block = 1;
8345 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8346 in_libcall_block = 0;
8348 cse_insn (insn, in_libcall_block);
8351 /* If INSN is now an unconditional jump, skip to the end of our
8352 basic block by pretending that we just did the last insn in the
8353 basic block. If we are jumping to the end of our block, show
8354 that we can have one usage of TO. */
8356 if (simplejump_p (insn))
8358 if (to == 0)
8359 return 0;
8361 if (JUMP_LABEL (insn) == to)
8362 to_usage = 1;
8364 /* Maybe TO was deleted because the jump is unconditional.
8365 If so, there is nothing left in this basic block. */
8366 /* ??? Perhaps it would be smarter to set TO
8367 to whatever follows this insn,
8368 and pretend the basic block had always ended here. */
8369 if (INSN_DELETED_P (to))
8370 break;
8372 insn = PREV_INSN (to);
8375 /* See if it is ok to keep on going past the label
8376 which used to end our basic block. Remember that we incremented
8377 the count of that label, so we decrement it here. If we made
8378 a jump unconditional, TO_USAGE will be one; in that case, we don't
8379 want to count the use in that jump. */
8381 if (to != 0 && NEXT_INSN (insn) == to
8382 && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
8384 struct cse_basic_block_data val;
8386 insn = NEXT_INSN (to);
8388 if (LABEL_NUSES (to) == 0)
8389 delete_insn (to);
8391 /* Find the end of the following block. Note that we won't be
8392 following branches in this case. If TO was the last insn
8393 in the function, we are done. Similarly, if we deleted the
8394 insn after TO, it must have been because it was preceded by
8395 a BARRIER. In that case, we are done with this block because it
8396 has no continuation. */
8398 if (insn == 0 || INSN_DELETED_P (insn))
8399 return 0;
8401 to_usage = 0;
8402 val.path_size = 0;
8403 cse_end_of_basic_block (insn, &val, 0, 0, 0);
8405 /* If the tables we allocated have enough space left
8406 to handle all the SETs in the next basic block,
8407 continue through it. Otherwise, return,
8408 and that block will be scanned individually. */
8409 if (val.nsets * 2 + next_qty > max_qty)
8410 break;
8412 cse_basic_block_start = val.low_cuid;
8413 cse_basic_block_end = val.high_cuid;
8414 to = val.last;
8416 /* Prevent TO from being deleted if it is a label. */
8417 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8418 ++LABEL_NUSES (to);
8420 /* Back up so we process the first insn in the extension. */
8421 insn = PREV_INSN (insn);
8425 if (next_qty > max_qty)
8426 abort ();
8428 /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
8429 the previous insn is the only insn that branches to the head of a loop,
8430 we can cse into the loop. Don't do this if we changed the jump
8431 structure of a loop unless we aren't going to be following jumps. */
8433 if ((cse_jumps_altered == 0
8434 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8435 && around_loop && to != 0
8436 && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
8437 && GET_CODE (PREV_INSN (to)) == JUMP_INSN
8438 && JUMP_LABEL (PREV_INSN (to)) != 0
8439 && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
8440 cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
8442 return to ? NEXT_INSN (to) : 0;
8445 /* Count the number of times registers are used (not set) in X.
8446 COUNTS is an array in which we accumulate the count, INCR is how much
8447 we count each register usage.
8449 Don't count a usage of DEST, which is the SET_DEST of a SET which
8450 contains X in its SET_SRC. This is because such a SET does not
8451 modify the liveness of DEST. */
8453 static void
8454 count_reg_usage (x, counts, dest, incr)
8455 rtx x;
8456 int *counts;
8457 rtx dest;
8458 int incr;
8460 enum rtx_code code;
8461 char *fmt;
8462 int i, j;
8464 if (x == 0)
8465 return;
8467 switch (code = GET_CODE (x))
8469 case REG:
8470 if (x != dest)
8471 counts[REGNO (x)] += incr;
8472 return;
8474 case PC:
8475 case CC0:
8476 case CONST:
8477 case CONST_INT:
8478 case CONST_DOUBLE:
8479 case SYMBOL_REF:
8480 case LABEL_REF:
8481 case CLOBBER:
8482 return;
8484 case SET:
8485 /* Unless we are setting a REG, count everything in SET_DEST. */
8486 if (GET_CODE (SET_DEST (x)) != REG)
8487 count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
8489 /* If SRC has side-effects, then we can't delete this insn, so the
8490 usage of SET_DEST inside SRC counts.
8492 ??? Strictly-speaking, we might be preserving this insn
8493 because some other SET has side-effects, but that's hard
8494 to do and can't happen now. */
8495 count_reg_usage (SET_SRC (x), counts,
8496 side_effects_p (SET_SRC (x)) ? NULL_RTX : SET_DEST (x),
8497 incr);
8498 return;
8500 case CALL_INSN:
8501 count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, NULL_RTX, incr);
8503 /* ... falls through ... */
8504 case INSN:
8505 case JUMP_INSN:
8506 count_reg_usage (PATTERN (x), counts, NULL_RTX, incr);
8508 /* Things used in a REG_EQUAL note aren't dead since loop may try to
8509 use them. */
8511 count_reg_usage (REG_NOTES (x), counts, NULL_RTX, incr);
8512 return;
8514 case EXPR_LIST:
8515 case INSN_LIST:
8516 if (REG_NOTE_KIND (x) == REG_EQUAL
8517 || GET_CODE (XEXP (x,0)) == USE)
8518 count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
8519 count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
8520 return;
8523 fmt = GET_RTX_FORMAT (code);
8524 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8526 if (fmt[i] == 'e')
8527 count_reg_usage (XEXP (x, i), counts, dest, incr);
8528 else if (fmt[i] == 'E')
8529 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8530 count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
8534 /* Scan all the insns and delete any that are dead; i.e., they store a register
8535 that is never used or they copy a register to itself.
8537 This is used to remove insns made obviously dead by cse. It improves the
8538 heuristics in loop since it won't try to move dead invariants out of loops
8539 or make givs for dead quantities. The remaining passes of the compilation
8540 are also sped up. */
8542 void
8543 delete_dead_from_cse (insns, nreg)
8544 rtx insns;
8545 int nreg;
8547 int *counts = (int *) alloca (nreg * sizeof (int));
8548 rtx insn, prev;
8549 rtx tem;
8550 int i;
8551 int in_libcall = 0;
8553 /* First count the number of times each register is used. */
8554 bzero ((char *) counts, sizeof (int) * nreg);
8555 for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
8556 count_reg_usage (insn, counts, NULL_RTX, 1);
8558 /* Go from the last insn to the first and delete insns that only set unused
8559 registers or copy a register to itself. As we delete an insn, remove
8560 usage counts for registers it uses. */
8561 for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
8563 int live_insn = 0;
8565 prev = prev_real_insn (insn);
8567 /* Don't delete any insns that are part of a libcall block.
8568 Flow or loop might get confused if we did that. Remember
8569 that we are scanning backwards. */
8570 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8571 in_libcall = 1;
8573 if (in_libcall)
8574 live_insn = 1;
8575 else if (GET_CODE (PATTERN (insn)) == SET)
8577 if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
8578 && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
8581 #ifdef HAVE_cc0
8582 else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
8583 && ! side_effects_p (SET_SRC (PATTERN (insn)))
8584 && ((tem = next_nonnote_insn (insn)) == 0
8585 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8586 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8588 #endif
8589 else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
8590 || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
8591 || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
8592 || side_effects_p (SET_SRC (PATTERN (insn))))
8593 live_insn = 1;
8595 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8596 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8598 rtx elt = XVECEXP (PATTERN (insn), 0, i);
8600 if (GET_CODE (elt) == SET)
8602 if (GET_CODE (SET_DEST (elt)) == REG
8603 && SET_DEST (elt) == SET_SRC (elt))
8606 #ifdef HAVE_cc0
8607 else if (GET_CODE (SET_DEST (elt)) == CC0
8608 && ! side_effects_p (SET_SRC (elt))
8609 && ((tem = next_nonnote_insn (insn)) == 0
8610 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8611 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8613 #endif
8614 else if (GET_CODE (SET_DEST (elt)) != REG
8615 || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
8616 || counts[REGNO (SET_DEST (elt))] != 0
8617 || side_effects_p (SET_SRC (elt)))
8618 live_insn = 1;
8620 else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
8621 live_insn = 1;
8623 else
8624 live_insn = 1;
8626 /* If this is a dead insn, delete it and show registers in it aren't
8627 being used. */
8629 if (! live_insn)
8631 count_reg_usage (insn, counts, NULL_RTX, -1);
8632 delete_insn (insn);
8635 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8636 in_libcall = 0;