import of gcc-2.8
[official-gcc.git] / gcc / cse.c
blob2436c057e8370a44d5aa4b6c40b3fcbc027f7aba
1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 88, 89, 92-6, 1997 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
22 #include "config.h"
23 /* Must precede rtl.h for FFS. */
24 #include <stdio.h>
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "flags.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "expr.h"
35 #include <setjmp.h>
37 /* The basic idea of common subexpression elimination is to go
38 through the code, keeping a record of expressions that would
39 have the same value at the current scan point, and replacing
40 expressions encountered with the cheapest equivalent expression.
42 It is too complicated to keep track of the different possibilities
43 when control paths merge; so, at each label, we forget all that is
44 known and start fresh. This can be described as processing each
45 basic block separately. Note, however, that these are not quite
46 the same as the basic blocks found by a later pass and used for
47 data flow analysis and register packing. We do not need to start fresh
48 after a conditional jump instruction if there is no label there.
50 We use two data structures to record the equivalent expressions:
51 a hash table for most expressions, and several vectors together
52 with "quantity numbers" to record equivalent (pseudo) registers.
54 The use of the special data structure for registers is desirable
55 because it is faster. It is possible because registers references
56 contain a fairly small number, the register number, taken from
57 a contiguously allocated series, and two register references are
58 identical if they have the same number. General expressions
59 do not have any such thing, so the only way to retrieve the
60 information recorded on an expression other than a register
61 is to keep it in a hash table.
63 Registers and "quantity numbers":
65 At the start of each basic block, all of the (hardware and pseudo)
66 registers used in the function are given distinct quantity
67 numbers to indicate their contents. During scan, when the code
68 copies one register into another, we copy the quantity number.
69 When a register is loaded in any other way, we allocate a new
70 quantity number to describe the value generated by this operation.
71 `reg_qty' records what quantity a register is currently thought
72 of as containing.
74 All real quantity numbers are greater than or equal to `max_reg'.
75 If register N has not been assigned a quantity, reg_qty[N] will equal N.
77 Quantity numbers below `max_reg' do not exist and none of the `qty_...'
78 variables should be referenced with an index below `max_reg'.
80 We also maintain a bidirectional chain of registers for each
81 quantity number. `qty_first_reg', `qty_last_reg',
82 `reg_next_eqv' and `reg_prev_eqv' hold these chains.
84 The first register in a chain is the one whose lifespan is least local.
85 Among equals, it is the one that was seen first.
86 We replace any equivalent register with that one.
88 If two registers have the same quantity number, it must be true that
89 REG expressions with `qty_mode' must be in the hash table for both
90 registers and must be in the same class.
92 The converse is not true. Since hard registers may be referenced in
93 any mode, two REG expressions might be equivalent in the hash table
94 but not have the same quantity number if the quantity number of one
95 of the registers is not the same mode as those expressions.
97 Constants and quantity numbers
99 When a quantity has a known constant value, that value is stored
100 in the appropriate element of qty_const. This is in addition to
101 putting the constant in the hash table as is usual for non-regs.
103 Whether a reg or a constant is preferred is determined by the configuration
104 macro CONST_COSTS and will often depend on the constant value. In any
105 event, expressions containing constants can be simplified, by fold_rtx.
107 When a quantity has a known nearly constant value (such as an address
108 of a stack slot), that value is stored in the appropriate element
109 of qty_const.
111 Integer constants don't have a machine mode. However, cse
112 determines the intended machine mode from the destination
113 of the instruction that moves the constant. The machine mode
114 is recorded in the hash table along with the actual RTL
115 constant expression so that different modes are kept separate.
117 Other expressions:
119 To record known equivalences among expressions in general
120 we use a hash table called `table'. It has a fixed number of buckets
121 that contain chains of `struct table_elt' elements for expressions.
122 These chains connect the elements whose expressions have the same
123 hash codes.
125 Other chains through the same elements connect the elements which
126 currently have equivalent values.
128 Register references in an expression are canonicalized before hashing
129 the expression. This is done using `reg_qty' and `qty_first_reg'.
130 The hash code of a register reference is computed using the quantity
131 number, not the register number.
133 When the value of an expression changes, it is necessary to remove from the
134 hash table not just that expression but all expressions whose values
135 could be different as a result.
137 1. If the value changing is in memory, except in special cases
138 ANYTHING referring to memory could be changed. That is because
139 nobody knows where a pointer does not point.
140 The function `invalidate_memory' removes what is necessary.
142 The special cases are when the address is constant or is
143 a constant plus a fixed register such as the frame pointer
144 or a static chain pointer. When such addresses are stored in,
145 we can tell exactly which other such addresses must be invalidated
146 due to overlap. `invalidate' does this.
147 All expressions that refer to non-constant
148 memory addresses are also invalidated. `invalidate_memory' does this.
150 2. If the value changing is a register, all expressions
151 containing references to that register, and only those,
152 must be removed.
154 Because searching the entire hash table for expressions that contain
155 a register is very slow, we try to figure out when it isn't necessary.
156 Precisely, this is necessary only when expressions have been
157 entered in the hash table using this register, and then the value has
158 changed, and then another expression wants to be added to refer to
159 the register's new value. This sequence of circumstances is rare
160 within any one basic block.
162 The vectors `reg_tick' and `reg_in_table' are used to detect this case.
163 reg_tick[i] is incremented whenever a value is stored in register i.
164 reg_in_table[i] holds -1 if no references to register i have been
165 entered in the table; otherwise, it contains the value reg_tick[i] had
166 when the references were entered. If we want to enter a reference
167 and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
168 Until we want to enter a new entry, the mere fact that the two vectors
169 don't match makes the entries be ignored if anyone tries to match them.
171 Registers themselves are entered in the hash table as well as in
172 the equivalent-register chains. However, the vectors `reg_tick'
173 and `reg_in_table' do not apply to expressions which are simple
174 register references. These expressions are removed from the table
175 immediately when they become invalid, and this can be done even if
176 we do not immediately search for all the expressions that refer to
177 the register.
179 A CLOBBER rtx in an instruction invalidates its operand for further
180 reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
181 invalidates everything that resides in memory.
183 Related expressions:
185 Constant expressions that differ only by an additive integer
186 are called related. When a constant expression is put in
187 the table, the related expression with no constant term
188 is also entered. These are made to point at each other
189 so that it is possible to find out if there exists any
190 register equivalent to an expression related to a given expression. */
192 /* One plus largest register number used in this function. */
194 static int max_reg;
196 /* Length of vectors indexed by quantity number.
197 We know in advance we will not need a quantity number this big. */
199 static int max_qty;
201 /* Next quantity number to be allocated.
202 This is 1 + the largest number needed so far. */
204 static int next_qty;
206 /* Indexed by quantity number, gives the first (or last) register
207 in the chain of registers that currently contain this quantity. */
209 static int *qty_first_reg;
210 static int *qty_last_reg;
212 /* Index by quantity number, gives the mode of the quantity. */
214 static enum machine_mode *qty_mode;
216 /* Indexed by quantity number, gives the rtx of the constant value of the
217 quantity, or zero if it does not have a known value.
218 A sum of the frame pointer (or arg pointer) plus a constant
219 can also be entered here. */
221 static rtx *qty_const;
223 /* Indexed by qty number, gives the insn that stored the constant value
224 recorded in `qty_const'. */
226 static rtx *qty_const_insn;
228 /* The next three variables are used to track when a comparison between a
229 quantity and some constant or register has been passed. In that case, we
230 know the results of the comparison in case we see it again. These variables
231 record a comparison that is known to be true. */
233 /* Indexed by qty number, gives the rtx code of a comparison with a known
234 result involving this quantity. If none, it is UNKNOWN. */
235 static enum rtx_code *qty_comparison_code;
237 /* Indexed by qty number, gives the constant being compared against in a
238 comparison of known result. If no such comparison, it is undefined.
239 If the comparison is not with a constant, it is zero. */
241 static rtx *qty_comparison_const;
243 /* Indexed by qty number, gives the quantity being compared against in a
244 comparison of known result. If no such comparison, if it undefined.
245 If the comparison is not with a register, it is -1. */
247 static int *qty_comparison_qty;
249 #ifdef HAVE_cc0
250 /* For machines that have a CC0, we do not record its value in the hash
251 table since its use is guaranteed to be the insn immediately following
252 its definition and any other insn is presumed to invalidate it.
254 Instead, we store below the value last assigned to CC0. If it should
255 happen to be a constant, it is stored in preference to the actual
256 assigned value. In case it is a constant, we store the mode in which
257 the constant should be interpreted. */
259 static rtx prev_insn_cc0;
260 static enum machine_mode prev_insn_cc0_mode;
261 #endif
263 /* Previous actual insn. 0 if at first insn of basic block. */
265 static rtx prev_insn;
267 /* Insn being scanned. */
269 static rtx this_insn;
271 /* Index by register number, gives the quantity number
272 of the register's current contents. */
274 static int *reg_qty;
276 /* Index by register number, gives the number of the next (or
277 previous) register in the chain of registers sharing the same
278 value.
280 Or -1 if this register is at the end of the chain.
282 If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
284 static int *reg_next_eqv;
285 static int *reg_prev_eqv;
287 /* Index by register number, gives the number of times
288 that register has been altered in the current basic block. */
290 static int *reg_tick;
292 /* Index by register number, gives the reg_tick value at which
293 rtx's containing this register are valid in the hash table.
294 If this does not equal the current reg_tick value, such expressions
295 existing in the hash table are invalid.
296 If this is -1, no expressions containing this register have been
297 entered in the table. */
299 static int *reg_in_table;
301 /* A HARD_REG_SET containing all the hard registers for which there is
302 currently a REG expression in the hash table. Note the difference
303 from the above variables, which indicate if the REG is mentioned in some
304 expression in the table. */
306 static HARD_REG_SET hard_regs_in_table;
308 /* A HARD_REG_SET containing all the hard registers that are invalidated
309 by a CALL_INSN. */
311 static HARD_REG_SET regs_invalidated_by_call;
313 /* Two vectors of ints:
314 one containing max_reg -1's; the other max_reg + 500 (an approximation
315 for max_qty) elements where element i contains i.
316 These are used to initialize various other vectors fast. */
318 static int *all_minus_one;
319 static int *consec_ints;
321 /* CUID of insn that starts the basic block currently being cse-processed. */
323 static int cse_basic_block_start;
325 /* CUID of insn that ends the basic block currently being cse-processed. */
327 static int cse_basic_block_end;
329 /* Vector mapping INSN_UIDs to cuids.
330 The cuids are like uids but increase monotonically always.
331 We use them to see whether a reg is used outside a given basic block. */
333 static int *uid_cuid;
335 /* Highest UID in UID_CUID. */
336 static int max_uid;
338 /* Get the cuid of an insn. */
340 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
342 /* Nonzero if cse has altered conditional jump insns
343 in such a way that jump optimization should be redone. */
345 static int cse_jumps_altered;
347 /* Nonzero if we put a LABEL_REF into the hash table. Since we may have put
348 it into an INSN without a REG_LABEL, we have to rerun jump after CSE
349 to put in the note. */
350 static int recorded_label_ref;
352 /* canon_hash stores 1 in do_not_record
353 if it notices a reference to CC0, PC, or some other volatile
354 subexpression. */
356 static int do_not_record;
358 #ifdef LOAD_EXTEND_OP
360 /* Scratch rtl used when looking for load-extended copy of a MEM. */
361 static rtx memory_extend_rtx;
362 #endif
364 /* canon_hash stores 1 in hash_arg_in_memory
365 if it notices a reference to memory within the expression being hashed. */
367 static int hash_arg_in_memory;
369 /* canon_hash stores 1 in hash_arg_in_struct
370 if it notices a reference to memory that's part of a structure. */
372 static int hash_arg_in_struct;
374 /* The hash table contains buckets which are chains of `struct table_elt's,
375 each recording one expression's information.
376 That expression is in the `exp' field.
378 Those elements with the same hash code are chained in both directions
379 through the `next_same_hash' and `prev_same_hash' fields.
381 Each set of expressions with equivalent values
382 are on a two-way chain through the `next_same_value'
383 and `prev_same_value' fields, and all point with
384 the `first_same_value' field at the first element in
385 that chain. The chain is in order of increasing cost.
386 Each element's cost value is in its `cost' field.
388 The `in_memory' field is nonzero for elements that
389 involve any reference to memory. These elements are removed
390 whenever a write is done to an unidentified location in memory.
391 To be safe, we assume that a memory address is unidentified unless
392 the address is either a symbol constant or a constant plus
393 the frame pointer or argument pointer.
395 The `in_struct' field is nonzero for elements that
396 involve any reference to memory inside a structure or array.
398 The `related_value' field is used to connect related expressions
399 (that differ by adding an integer).
400 The related expressions are chained in a circular fashion.
401 `related_value' is zero for expressions for which this
402 chain is not useful.
404 The `cost' field stores the cost of this element's expression.
406 The `is_const' flag is set if the element is a constant (including
407 a fixed address).
409 The `flag' field is used as a temporary during some search routines.
411 The `mode' field is usually the same as GET_MODE (`exp'), but
412 if `exp' is a CONST_INT and has no machine mode then the `mode'
413 field is the mode it was being used as. Each constant is
414 recorded separately for each mode it is used with. */
417 struct table_elt
419 rtx exp;
420 struct table_elt *next_same_hash;
421 struct table_elt *prev_same_hash;
422 struct table_elt *next_same_value;
423 struct table_elt *prev_same_value;
424 struct table_elt *first_same_value;
425 struct table_elt *related_value;
426 int cost;
427 enum machine_mode mode;
428 char in_memory;
429 char in_struct;
430 char is_const;
431 char flag;
434 /* We don't want a lot of buckets, because we rarely have very many
435 things stored in the hash table, and a lot of buckets slows
436 down a lot of loops that happen frequently. */
437 #define NBUCKETS 31
439 /* Compute hash code of X in mode M. Special-case case where X is a pseudo
440 register (hard registers may require `do_not_record' to be set). */
442 #define HASH(X, M) \
443 (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
444 ? (((unsigned) REG << 7) + (unsigned) reg_qty[REGNO (X)]) % NBUCKETS \
445 : canon_hash (X, M) % NBUCKETS)
447 /* Determine whether register number N is considered a fixed register for CSE.
448 It is desirable to replace other regs with fixed regs, to reduce need for
449 non-fixed hard regs.
450 A reg wins if it is either the frame pointer or designated as fixed,
451 but not if it is an overlapping register. */
452 #ifdef OVERLAPPING_REGNO_P
453 #define FIXED_REGNO_P(N) \
454 (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
455 || fixed_regs[N] || global_regs[N]) \
456 && ! OVERLAPPING_REGNO_P ((N)))
457 #else
458 #define FIXED_REGNO_P(N) \
459 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
460 || fixed_regs[N] || global_regs[N])
461 #endif
463 /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
464 hard registers and pointers into the frame are the cheapest with a cost
465 of 0. Next come pseudos with a cost of one and other hard registers with
466 a cost of 2. Aside from these special cases, call `rtx_cost'. */
468 #define CHEAP_REGNO(N) \
469 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
470 || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
471 || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
472 || ((N) < FIRST_PSEUDO_REGISTER \
473 && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
475 /* A register is cheap if it is a user variable assigned to the register
476 or if its register number always corresponds to a cheap register. */
478 #define CHEAP_REG(N) \
479 ((REG_USERVAR_P (N) && REGNO (N) < FIRST_PSEUDO_REGISTER) \
480 || CHEAP_REGNO (REGNO (N)))
482 #define COST(X) \
483 (GET_CODE (X) == REG \
484 ? (CHEAP_REG (X) ? 0 \
485 : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
486 : 2) \
487 : notreg_cost(X))
489 /* Determine if the quantity number for register X represents a valid index
490 into the `qty_...' variables. */
492 #define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
494 static struct table_elt *table[NBUCKETS];
496 /* Chain of `struct table_elt's made so far for this function
497 but currently removed from the table. */
499 static struct table_elt *free_element_chain;
501 /* Number of `struct table_elt' structures made so far for this function. */
503 static int n_elements_made;
505 /* Maximum value `n_elements_made' has had so far in this compilation
506 for functions previously processed. */
508 static int max_elements_made;
510 /* Surviving equivalence class when two equivalence classes are merged
511 by recording the effects of a jump in the last insn. Zero if the
512 last insn was not a conditional jump. */
514 static struct table_elt *last_jump_equiv_class;
516 /* Set to the cost of a constant pool reference if one was found for a
517 symbolic constant. If this was found, it means we should try to
518 convert constants into constant pool entries if they don't fit in
519 the insn. */
521 static int constant_pool_entries_cost;
523 /* Bits describing what kind of values in memory must be invalidated
524 for a particular instruction. If all three bits are zero,
525 no memory refs need to be invalidated. Each bit is more powerful
526 than the preceding ones, and if a bit is set then the preceding
527 bits are also set.
529 Here is how the bits are set:
530 Pushing onto the stack invalidates only the stack pointer,
531 writing at a fixed address invalidates only variable addresses,
532 writing in a structure element at variable address
533 invalidates all but scalar variables,
534 and writing in anything else at variable address invalidates everything. */
536 struct write_data
538 int sp : 1; /* Invalidate stack pointer. */
539 int var : 1; /* Invalidate variable addresses. */
540 int nonscalar : 1; /* Invalidate all but scalar variables. */
541 int all : 1; /* Invalidate all memory refs. */
544 /* Define maximum length of a branch path. */
546 #define PATHLENGTH 10
548 /* This data describes a block that will be processed by cse_basic_block. */
550 struct cse_basic_block_data {
551 /* Lowest CUID value of insns in block. */
552 int low_cuid;
553 /* Highest CUID value of insns in block. */
554 int high_cuid;
555 /* Total number of SETs in block. */
556 int nsets;
557 /* Last insn in the block. */
558 rtx last;
559 /* Size of current branch path, if any. */
560 int path_size;
561 /* Current branch path, indicating which branches will be taken. */
562 struct branch_path {
563 /* The branch insn. */
564 rtx branch;
565 /* Whether it should be taken or not. AROUND is the same as taken
566 except that it is used when the destination label is not preceded
567 by a BARRIER. */
568 enum taken {TAKEN, NOT_TAKEN, AROUND} status;
569 } path[PATHLENGTH];
572 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
573 virtual regs here because the simplify_*_operation routines are called
574 by integrate.c, which is called before virtual register instantiation. */
576 #define FIXED_BASE_PLUS_P(X) \
577 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
578 || (X) == arg_pointer_rtx \
579 || (X) == virtual_stack_vars_rtx \
580 || (X) == virtual_incoming_args_rtx \
581 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
582 && (XEXP (X, 0) == frame_pointer_rtx \
583 || XEXP (X, 0) == hard_frame_pointer_rtx \
584 || XEXP (X, 0) == arg_pointer_rtx \
585 || XEXP (X, 0) == virtual_stack_vars_rtx \
586 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
587 || GET_CODE (X) == ADDRESSOF)
589 /* Similar, but also allows reference to the stack pointer.
591 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
592 arg_pointer_rtx by itself is nonzero, because on at least one machine,
593 the i960, the arg pointer is zero when it is unused. */
595 #define NONZERO_BASE_PLUS_P(X) \
596 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
597 || (X) == virtual_stack_vars_rtx \
598 || (X) == virtual_incoming_args_rtx \
599 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
600 && (XEXP (X, 0) == frame_pointer_rtx \
601 || XEXP (X, 0) == hard_frame_pointer_rtx \
602 || XEXP (X, 0) == arg_pointer_rtx \
603 || XEXP (X, 0) == virtual_stack_vars_rtx \
604 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
605 || (X) == stack_pointer_rtx \
606 || (X) == virtual_stack_dynamic_rtx \
607 || (X) == virtual_outgoing_args_rtx \
608 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
609 && (XEXP (X, 0) == stack_pointer_rtx \
610 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
611 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
612 || GET_CODE (X) == ADDRESSOF)
614 static int notreg_cost PROTO((rtx));
615 static void new_basic_block PROTO((void));
616 static void make_new_qty PROTO((int));
617 static void make_regs_eqv PROTO((int, int));
618 static void delete_reg_equiv PROTO((int));
619 static int mention_regs PROTO((rtx));
620 static int insert_regs PROTO((rtx, struct table_elt *, int));
621 static void free_element PROTO((struct table_elt *));
622 static void remove_from_table PROTO((struct table_elt *, unsigned));
623 static struct table_elt *get_element PROTO((void));
624 static struct table_elt *lookup PROTO((rtx, unsigned, enum machine_mode)),
625 *lookup_for_remove PROTO((rtx, unsigned, enum machine_mode));
626 static rtx lookup_as_function PROTO((rtx, enum rtx_code));
627 static struct table_elt *insert PROTO((rtx, struct table_elt *, unsigned,
628 enum machine_mode));
629 static void merge_equiv_classes PROTO((struct table_elt *,
630 struct table_elt *));
631 static void invalidate PROTO((rtx, enum machine_mode));
632 static void remove_invalid_refs PROTO((int));
633 static void rehash_using_reg PROTO((rtx));
634 static void invalidate_memory PROTO((struct write_data *));
635 static void invalidate_for_call PROTO((void));
636 static rtx use_related_value PROTO((rtx, struct table_elt *));
637 static unsigned canon_hash PROTO((rtx, enum machine_mode));
638 static unsigned safe_hash PROTO((rtx, enum machine_mode));
639 static int exp_equiv_p PROTO((rtx, rtx, int, int));
640 static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
641 HOST_WIDE_INT *,
642 HOST_WIDE_INT *));
643 static int refers_to_p PROTO((rtx, rtx));
644 static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT,
645 HOST_WIDE_INT));
646 static int cse_rtx_addr_varies_p PROTO((rtx));
647 static rtx canon_reg PROTO((rtx, rtx));
648 static void find_best_addr PROTO((rtx, rtx *));
649 static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
650 enum machine_mode *,
651 enum machine_mode *));
652 static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
653 rtx, rtx));
654 static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
655 rtx, rtx));
656 static rtx fold_rtx PROTO((rtx, rtx));
657 static rtx equiv_constant PROTO((rtx));
658 static void record_jump_equiv PROTO((rtx, int));
659 static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
660 rtx, rtx, int));
661 static void cse_insn PROTO((rtx, int));
662 static void note_mem_written PROTO((rtx, struct write_data *));
663 static void invalidate_from_clobbers PROTO((struct write_data *, rtx));
664 static rtx cse_process_notes PROTO((rtx, rtx));
665 static void cse_around_loop PROTO((rtx));
666 static void invalidate_skipped_set PROTO((rtx, rtx));
667 static void invalidate_skipped_block PROTO((rtx));
668 static void cse_check_loop_start PROTO((rtx, rtx));
669 static void cse_set_around_loop PROTO((rtx, rtx, rtx));
670 static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
671 static void count_reg_usage PROTO((rtx, int *, rtx, int));
673 extern int rtx_equal_function_value_matters;
675 /* Return an estimate of the cost of computing rtx X.
676 One use is in cse, to decide which expression to keep in the hash table.
677 Another is in rtl generation, to pick the cheapest way to multiply.
678 Other uses like the latter are expected in the future. */
680 /* Internal function, to compute cost when X is not a register; called
681 from COST macro to keep it simple. */
683 static int
684 notreg_cost (x)
685 rtx x;
687 return ((GET_CODE (x) == SUBREG
688 && GET_CODE (SUBREG_REG (x)) == REG
689 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
690 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT
691 && (GET_MODE_SIZE (GET_MODE (x))
692 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
693 && subreg_lowpart_p (x)
694 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)),
695 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))))
696 ? (CHEAP_REG (SUBREG_REG (x)) ? 0
697 : (REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER ? 1
698 : 2))
699 : rtx_cost (x, SET) * 2);
702 /* Return the right cost to give to an operation
703 to make the cost of the corresponding register-to-register instruction
704 N times that of a fast register-to-register instruction. */
706 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
709 rtx_cost (x, outer_code)
710 rtx x;
711 enum rtx_code outer_code;
713 register int i, j;
714 register enum rtx_code code;
715 register char *fmt;
716 register int total;
718 if (x == 0)
719 return 0;
721 /* Compute the default costs of certain things.
722 Note that RTX_COSTS can override the defaults. */
724 code = GET_CODE (x);
725 switch (code)
727 case MULT:
728 /* Count multiplication by 2**n as a shift,
729 because if we are considering it, we would output it as a shift. */
730 if (GET_CODE (XEXP (x, 1)) == CONST_INT
731 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
732 total = 2;
733 else
734 total = COSTS_N_INSNS (5);
735 break;
736 case DIV:
737 case UDIV:
738 case MOD:
739 case UMOD:
740 total = COSTS_N_INSNS (7);
741 break;
742 case USE:
743 /* Used in loop.c and combine.c as a marker. */
744 total = 0;
745 break;
746 case ASM_OPERANDS:
747 /* We don't want these to be used in substitutions because
748 we have no way of validating the resulting insn. So assign
749 anything containing an ASM_OPERANDS a very high cost. */
750 total = 1000;
751 break;
752 default:
753 total = 2;
756 switch (code)
758 case REG:
759 return ! CHEAP_REG (x);
761 case SUBREG:
762 /* If we can't tie these modes, make this expensive. The larger
763 the mode, the more expensive it is. */
764 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
765 return COSTS_N_INSNS (2
766 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
767 return 2;
768 #ifdef RTX_COSTS
769 RTX_COSTS (x, code, outer_code);
770 #endif
771 CONST_COSTS (x, code, outer_code);
774 /* Sum the costs of the sub-rtx's, plus cost of this operation,
775 which is already in total. */
777 fmt = GET_RTX_FORMAT (code);
778 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
779 if (fmt[i] == 'e')
780 total += rtx_cost (XEXP (x, i), code);
781 else if (fmt[i] == 'E')
782 for (j = 0; j < XVECLEN (x, i); j++)
783 total += rtx_cost (XVECEXP (x, i, j), code);
785 return total;
788 /* Clear the hash table and initialize each register with its own quantity,
789 for a new basic block. */
791 static void
792 new_basic_block ()
794 register int i;
796 next_qty = max_reg;
798 bzero ((char *) reg_tick, max_reg * sizeof (int));
800 bcopy ((char *) all_minus_one, (char *) reg_in_table,
801 max_reg * sizeof (int));
802 bcopy ((char *) consec_ints, (char *) reg_qty, max_reg * sizeof (int));
803 CLEAR_HARD_REG_SET (hard_regs_in_table);
805 /* The per-quantity values used to be initialized here, but it is
806 much faster to initialize each as it is made in `make_new_qty'. */
808 for (i = 0; i < NBUCKETS; i++)
810 register struct table_elt *this, *next;
811 for (this = table[i]; this; this = next)
813 next = this->next_same_hash;
814 free_element (this);
818 bzero ((char *) table, sizeof table);
820 prev_insn = 0;
822 #ifdef HAVE_cc0
823 prev_insn_cc0 = 0;
824 #endif
827 /* Say that register REG contains a quantity not in any register before
828 and initialize that quantity. */
830 static void
831 make_new_qty (reg)
832 register int reg;
834 register int q;
836 if (next_qty >= max_qty)
837 abort ();
839 q = reg_qty[reg] = next_qty++;
840 qty_first_reg[q] = reg;
841 qty_last_reg[q] = reg;
842 qty_const[q] = qty_const_insn[q] = 0;
843 qty_comparison_code[q] = UNKNOWN;
845 reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
848 /* Make reg NEW equivalent to reg OLD.
849 OLD is not changing; NEW is. */
851 static void
852 make_regs_eqv (new, old)
853 register int new, old;
855 register int lastr, firstr;
856 register int q = reg_qty[old];
858 /* Nothing should become eqv until it has a "non-invalid" qty number. */
859 if (! REGNO_QTY_VALID_P (old))
860 abort ();
862 reg_qty[new] = q;
863 firstr = qty_first_reg[q];
864 lastr = qty_last_reg[q];
866 /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
867 hard regs. Among pseudos, if NEW will live longer than any other reg
868 of the same qty, and that is beyond the current basic block,
869 make it the new canonical replacement for this qty. */
870 if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
871 /* Certain fixed registers might be of the class NO_REGS. This means
872 that not only can they not be allocated by the compiler, but
873 they cannot be used in substitutions or canonicalizations
874 either. */
875 && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
876 && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
877 || (new >= FIRST_PSEUDO_REGISTER
878 && (firstr < FIRST_PSEUDO_REGISTER
879 || ((uid_cuid[REGNO_LAST_UID (new)] > cse_basic_block_end
880 || (uid_cuid[REGNO_FIRST_UID (new)]
881 < cse_basic_block_start))
882 && (uid_cuid[REGNO_LAST_UID (new)]
883 > uid_cuid[REGNO_LAST_UID (firstr)]))))))
885 reg_prev_eqv[firstr] = new;
886 reg_next_eqv[new] = firstr;
887 reg_prev_eqv[new] = -1;
888 qty_first_reg[q] = new;
890 else
892 /* If NEW is a hard reg (known to be non-fixed), insert at end.
893 Otherwise, insert before any non-fixed hard regs that are at the
894 end. Registers of class NO_REGS cannot be used as an
895 equivalent for anything. */
896 while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
897 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
898 && new >= FIRST_PSEUDO_REGISTER)
899 lastr = reg_prev_eqv[lastr];
900 reg_next_eqv[new] = reg_next_eqv[lastr];
901 if (reg_next_eqv[lastr] >= 0)
902 reg_prev_eqv[reg_next_eqv[lastr]] = new;
903 else
904 qty_last_reg[q] = new;
905 reg_next_eqv[lastr] = new;
906 reg_prev_eqv[new] = lastr;
910 /* Remove REG from its equivalence class. */
912 static void
913 delete_reg_equiv (reg)
914 register int reg;
916 register int q = reg_qty[reg];
917 register int p, n;
919 /* If invalid, do nothing. */
920 if (q == reg)
921 return;
923 p = reg_prev_eqv[reg];
924 n = reg_next_eqv[reg];
926 if (n != -1)
927 reg_prev_eqv[n] = p;
928 else
929 qty_last_reg[q] = p;
930 if (p != -1)
931 reg_next_eqv[p] = n;
932 else
933 qty_first_reg[q] = n;
935 reg_qty[reg] = reg;
938 /* Remove any invalid expressions from the hash table
939 that refer to any of the registers contained in expression X.
941 Make sure that newly inserted references to those registers
942 as subexpressions will be considered valid.
944 mention_regs is not called when a register itself
945 is being stored in the table.
947 Return 1 if we have done something that may have changed the hash code
948 of X. */
950 static int
951 mention_regs (x)
952 rtx x;
954 register enum rtx_code code;
955 register int i, j;
956 register char *fmt;
957 register int changed = 0;
959 if (x == 0)
960 return 0;
962 code = GET_CODE (x);
963 if (code == REG)
965 register int regno = REGNO (x);
966 register int endregno
967 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
968 : HARD_REGNO_NREGS (regno, GET_MODE (x)));
969 int i;
971 for (i = regno; i < endregno; i++)
973 if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
974 remove_invalid_refs (i);
976 reg_in_table[i] = reg_tick[i];
979 return 0;
982 /* If X is a comparison or a COMPARE and either operand is a register
983 that does not have a quantity, give it one. This is so that a later
984 call to record_jump_equiv won't cause X to be assigned a different
985 hash code and not found in the table after that call.
987 It is not necessary to do this here, since rehash_using_reg can
988 fix up the table later, but doing this here eliminates the need to
989 call that expensive function in the most common case where the only
990 use of the register is in the comparison. */
992 if (code == COMPARE || GET_RTX_CLASS (code) == '<')
994 if (GET_CODE (XEXP (x, 0)) == REG
995 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
996 if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
998 rehash_using_reg (XEXP (x, 0));
999 changed = 1;
1002 if (GET_CODE (XEXP (x, 1)) == REG
1003 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
1004 if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
1006 rehash_using_reg (XEXP (x, 1));
1007 changed = 1;
1011 fmt = GET_RTX_FORMAT (code);
1012 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1013 if (fmt[i] == 'e')
1014 changed |= mention_regs (XEXP (x, i));
1015 else if (fmt[i] == 'E')
1016 for (j = 0; j < XVECLEN (x, i); j++)
1017 changed |= mention_regs (XVECEXP (x, i, j));
1019 return changed;
1022 /* Update the register quantities for inserting X into the hash table
1023 with a value equivalent to CLASSP.
1024 (If the class does not contain a REG, it is irrelevant.)
1025 If MODIFIED is nonzero, X is a destination; it is being modified.
1026 Note that delete_reg_equiv should be called on a register
1027 before insert_regs is done on that register with MODIFIED != 0.
1029 Nonzero value means that elements of reg_qty have changed
1030 so X's hash code may be different. */
1032 static int
1033 insert_regs (x, classp, modified)
1034 rtx x;
1035 struct table_elt *classp;
1036 int modified;
1038 if (GET_CODE (x) == REG)
1040 register int regno = REGNO (x);
1042 /* If REGNO is in the equivalence table already but is of the
1043 wrong mode for that equivalence, don't do anything here. */
1045 if (REGNO_QTY_VALID_P (regno)
1046 && qty_mode[reg_qty[regno]] != GET_MODE (x))
1047 return 0;
1049 if (modified || ! REGNO_QTY_VALID_P (regno))
1051 if (classp)
1052 for (classp = classp->first_same_value;
1053 classp != 0;
1054 classp = classp->next_same_value)
1055 if (GET_CODE (classp->exp) == REG
1056 && GET_MODE (classp->exp) == GET_MODE (x))
1058 make_regs_eqv (regno, REGNO (classp->exp));
1059 return 1;
1062 make_new_qty (regno);
1063 qty_mode[reg_qty[regno]] = GET_MODE (x);
1064 return 1;
1067 return 0;
1070 /* If X is a SUBREG, we will likely be inserting the inner register in the
1071 table. If that register doesn't have an assigned quantity number at
1072 this point but does later, the insertion that we will be doing now will
1073 not be accessible because its hash code will have changed. So assign
1074 a quantity number now. */
1076 else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
1077 && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
1079 insert_regs (SUBREG_REG (x), NULL_PTR, 0);
1080 mention_regs (SUBREG_REG (x));
1081 return 1;
1083 else
1084 return mention_regs (x);
1087 /* Look in or update the hash table. */
1089 /* Put the element ELT on the list of free elements. */
1091 static void
1092 free_element (elt)
1093 struct table_elt *elt;
1095 elt->next_same_hash = free_element_chain;
1096 free_element_chain = elt;
1099 /* Return an element that is free for use. */
1101 static struct table_elt *
1102 get_element ()
1104 struct table_elt *elt = free_element_chain;
1105 if (elt)
1107 free_element_chain = elt->next_same_hash;
1108 return elt;
1110 n_elements_made++;
1111 return (struct table_elt *) oballoc (sizeof (struct table_elt));
1114 /* Remove table element ELT from use in the table.
1115 HASH is its hash code, made using the HASH macro.
1116 It's an argument because often that is known in advance
1117 and we save much time not recomputing it. */
1119 static void
1120 remove_from_table (elt, hash)
1121 register struct table_elt *elt;
1122 unsigned hash;
1124 if (elt == 0)
1125 return;
1127 /* Mark this element as removed. See cse_insn. */
1128 elt->first_same_value = 0;
1130 /* Remove the table element from its equivalence class. */
1133 register struct table_elt *prev = elt->prev_same_value;
1134 register struct table_elt *next = elt->next_same_value;
1136 if (next) next->prev_same_value = prev;
1138 if (prev)
1139 prev->next_same_value = next;
1140 else
1142 register struct table_elt *newfirst = next;
1143 while (next)
1145 next->first_same_value = newfirst;
1146 next = next->next_same_value;
1151 /* Remove the table element from its hash bucket. */
1154 register struct table_elt *prev = elt->prev_same_hash;
1155 register struct table_elt *next = elt->next_same_hash;
1157 if (next) next->prev_same_hash = prev;
1159 if (prev)
1160 prev->next_same_hash = next;
1161 else if (table[hash] == elt)
1162 table[hash] = next;
1163 else
1165 /* This entry is not in the proper hash bucket. This can happen
1166 when two classes were merged by `merge_equiv_classes'. Search
1167 for the hash bucket that it heads. This happens only very
1168 rarely, so the cost is acceptable. */
1169 for (hash = 0; hash < NBUCKETS; hash++)
1170 if (table[hash] == elt)
1171 table[hash] = next;
1175 /* Remove the table element from its related-value circular chain. */
1177 if (elt->related_value != 0 && elt->related_value != elt)
1179 register struct table_elt *p = elt->related_value;
1180 while (p->related_value != elt)
1181 p = p->related_value;
1182 p->related_value = elt->related_value;
1183 if (p->related_value == p)
1184 p->related_value = 0;
1187 free_element (elt);
1190 /* Look up X in the hash table and return its table element,
1191 or 0 if X is not in the table.
1193 MODE is the machine-mode of X, or if X is an integer constant
1194 with VOIDmode then MODE is the mode with which X will be used.
1196 Here we are satisfied to find an expression whose tree structure
1197 looks like X. */
1199 static struct table_elt *
1200 lookup (x, hash, mode)
1201 rtx x;
1202 unsigned hash;
1203 enum machine_mode mode;
1205 register struct table_elt *p;
1207 for (p = table[hash]; p; p = p->next_same_hash)
1208 if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
1209 || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
1210 return p;
1212 return 0;
1215 /* Like `lookup' but don't care whether the table element uses invalid regs.
1216 Also ignore discrepancies in the machine mode of a register. */
1218 static struct table_elt *
1219 lookup_for_remove (x, hash, mode)
1220 rtx x;
1221 unsigned hash;
1222 enum machine_mode mode;
1224 register struct table_elt *p;
1226 if (GET_CODE (x) == REG)
1228 int regno = REGNO (x);
1229 /* Don't check the machine mode when comparing registers;
1230 invalidating (REG:SI 0) also invalidates (REG:DF 0). */
1231 for (p = table[hash]; p; p = p->next_same_hash)
1232 if (GET_CODE (p->exp) == REG
1233 && REGNO (p->exp) == regno)
1234 return p;
1236 else
1238 for (p = table[hash]; p; p = p->next_same_hash)
1239 if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
1240 return p;
1243 return 0;
1246 /* Look for an expression equivalent to X and with code CODE.
1247 If one is found, return that expression. */
1249 static rtx
1250 lookup_as_function (x, code)
1251 rtx x;
1252 enum rtx_code code;
1254 register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
1255 GET_MODE (x));
1256 if (p == 0)
1257 return 0;
1259 for (p = p->first_same_value; p; p = p->next_same_value)
1261 if (GET_CODE (p->exp) == code
1262 /* Make sure this is a valid entry in the table. */
1263 && exp_equiv_p (p->exp, p->exp, 1, 0))
1264 return p->exp;
1267 return 0;
1270 /* Insert X in the hash table, assuming HASH is its hash code
1271 and CLASSP is an element of the class it should go in
1272 (or 0 if a new class should be made).
1273 It is inserted at the proper position to keep the class in
1274 the order cheapest first.
1276 MODE is the machine-mode of X, or if X is an integer constant
1277 with VOIDmode then MODE is the mode with which X will be used.
1279 For elements of equal cheapness, the most recent one
1280 goes in front, except that the first element in the list
1281 remains first unless a cheaper element is added. The order of
1282 pseudo-registers does not matter, as canon_reg will be called to
1283 find the cheapest when a register is retrieved from the table.
1285 The in_memory field in the hash table element is set to 0.
1286 The caller must set it nonzero if appropriate.
1288 You should call insert_regs (X, CLASSP, MODIFY) before calling here,
1289 and if insert_regs returns a nonzero value
1290 you must then recompute its hash code before calling here.
1292 If necessary, update table showing constant values of quantities. */
1294 #define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
1296 static struct table_elt *
1297 insert (x, classp, hash, mode)
1298 register rtx x;
1299 register struct table_elt *classp;
1300 unsigned hash;
1301 enum machine_mode mode;
1303 register struct table_elt *elt;
1305 /* If X is a register and we haven't made a quantity for it,
1306 something is wrong. */
1307 if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
1308 abort ();
1310 /* If X is a hard register, show it is being put in the table. */
1311 if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
1313 int regno = REGNO (x);
1314 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1315 int i;
1317 for (i = regno; i < endregno; i++)
1318 SET_HARD_REG_BIT (hard_regs_in_table, i);
1321 /* If X is a label, show we recorded it. */
1322 if (GET_CODE (x) == LABEL_REF
1323 || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
1324 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF))
1325 recorded_label_ref = 1;
1327 /* Put an element for X into the right hash bucket. */
1329 elt = get_element ();
1330 elt->exp = x;
1331 elt->cost = COST (x);
1332 elt->next_same_value = 0;
1333 elt->prev_same_value = 0;
1334 elt->next_same_hash = table[hash];
1335 elt->prev_same_hash = 0;
1336 elt->related_value = 0;
1337 elt->in_memory = 0;
1338 elt->mode = mode;
1339 elt->is_const = (CONSTANT_P (x)
1340 /* GNU C++ takes advantage of this for `this'
1341 (and other const values). */
1342 || (RTX_UNCHANGING_P (x)
1343 && GET_CODE (x) == REG
1344 && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1345 || FIXED_BASE_PLUS_P (x));
1347 if (table[hash])
1348 table[hash]->prev_same_hash = elt;
1349 table[hash] = elt;
1351 /* Put it into the proper value-class. */
1352 if (classp)
1354 classp = classp->first_same_value;
1355 if (CHEAPER (elt, classp))
1356 /* Insert at the head of the class */
1358 register struct table_elt *p;
1359 elt->next_same_value = classp;
1360 classp->prev_same_value = elt;
1361 elt->first_same_value = elt;
1363 for (p = classp; p; p = p->next_same_value)
1364 p->first_same_value = elt;
1366 else
1368 /* Insert not at head of the class. */
1369 /* Put it after the last element cheaper than X. */
1370 register struct table_elt *p, *next;
1371 for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
1372 p = next);
1373 /* Put it after P and before NEXT. */
1374 elt->next_same_value = next;
1375 if (next)
1376 next->prev_same_value = elt;
1377 elt->prev_same_value = p;
1378 p->next_same_value = elt;
1379 elt->first_same_value = classp;
1382 else
1383 elt->first_same_value = elt;
1385 /* If this is a constant being set equivalent to a register or a register
1386 being set equivalent to a constant, note the constant equivalence.
1388 If this is a constant, it cannot be equivalent to a different constant,
1389 and a constant is the only thing that can be cheaper than a register. So
1390 we know the register is the head of the class (before the constant was
1391 inserted).
1393 If this is a register that is not already known equivalent to a
1394 constant, we must check the entire class.
1396 If this is a register that is already known equivalent to an insn,
1397 update `qty_const_insn' to show that `this_insn' is the latest
1398 insn making that quantity equivalent to the constant. */
1400 if (elt->is_const && classp && GET_CODE (classp->exp) == REG
1401 && GET_CODE (x) != REG)
1403 qty_const[reg_qty[REGNO (classp->exp)]]
1404 = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
1405 qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
1408 else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]]
1409 && ! elt->is_const)
1411 register struct table_elt *p;
1413 for (p = classp; p != 0; p = p->next_same_value)
1415 if (p->is_const && GET_CODE (p->exp) != REG)
1417 qty_const[reg_qty[REGNO (x)]]
1418 = gen_lowpart_if_possible (GET_MODE (x), p->exp);
1419 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1420 break;
1425 else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
1426 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
1427 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1429 /* If this is a constant with symbolic value,
1430 and it has a term with an explicit integer value,
1431 link it up with related expressions. */
1432 if (GET_CODE (x) == CONST)
1434 rtx subexp = get_related_value (x);
1435 unsigned subhash;
1436 struct table_elt *subelt, *subelt_prev;
1438 if (subexp != 0)
1440 /* Get the integer-free subexpression in the hash table. */
1441 subhash = safe_hash (subexp, mode) % NBUCKETS;
1442 subelt = lookup (subexp, subhash, mode);
1443 if (subelt == 0)
1444 subelt = insert (subexp, NULL_PTR, subhash, mode);
1445 /* Initialize SUBELT's circular chain if it has none. */
1446 if (subelt->related_value == 0)
1447 subelt->related_value = subelt;
1448 /* Find the element in the circular chain that precedes SUBELT. */
1449 subelt_prev = subelt;
1450 while (subelt_prev->related_value != subelt)
1451 subelt_prev = subelt_prev->related_value;
1452 /* Put new ELT into SUBELT's circular chain just before SUBELT.
1453 This way the element that follows SUBELT is the oldest one. */
1454 elt->related_value = subelt_prev->related_value;
1455 subelt_prev->related_value = elt;
1459 return elt;
1462 /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
1463 CLASS2 into CLASS1. This is done when we have reached an insn which makes
1464 the two classes equivalent.
1466 CLASS1 will be the surviving class; CLASS2 should not be used after this
1467 call.
1469 Any invalid entries in CLASS2 will not be copied. */
1471 static void
1472 merge_equiv_classes (class1, class2)
1473 struct table_elt *class1, *class2;
1475 struct table_elt *elt, *next, *new;
1477 /* Ensure we start with the head of the classes. */
1478 class1 = class1->first_same_value;
1479 class2 = class2->first_same_value;
1481 /* If they were already equal, forget it. */
1482 if (class1 == class2)
1483 return;
1485 for (elt = class2; elt; elt = next)
1487 unsigned hash;
1488 rtx exp = elt->exp;
1489 enum machine_mode mode = elt->mode;
1491 next = elt->next_same_value;
1493 /* Remove old entry, make a new one in CLASS1's class.
1494 Don't do this for invalid entries as we cannot find their
1495 hash code (it also isn't necessary). */
1496 if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
1498 hash_arg_in_memory = 0;
1499 hash_arg_in_struct = 0;
1500 hash = HASH (exp, mode);
1502 if (GET_CODE (exp) == REG)
1503 delete_reg_equiv (REGNO (exp));
1505 remove_from_table (elt, hash);
1507 if (insert_regs (exp, class1, 0))
1509 rehash_using_reg (exp);
1510 hash = HASH (exp, mode);
1512 new = insert (exp, class1, hash, mode);
1513 new->in_memory = hash_arg_in_memory;
1514 new->in_struct = hash_arg_in_struct;
1519 /* Remove from the hash table, or mark as invalid,
1520 all expressions whose values could be altered by storing in X.
1521 X is a register, a subreg, or a memory reference with nonvarying address
1522 (because, when a memory reference with a varying address is stored in,
1523 all memory references are removed by invalidate_memory
1524 so specific invalidation is superfluous).
1525 FULL_MODE, if not VOIDmode, indicates that this much should be invalidated
1526 instead of just the amount indicated by the mode of X. This is only used
1527 for bitfield stores into memory.
1529 A nonvarying address may be just a register or just
1530 a symbol reference, or it may be either of those plus
1531 a numeric offset. */
1533 static void
1534 invalidate (x, full_mode)
1535 rtx x;
1536 enum machine_mode full_mode;
1538 register int i;
1539 register struct table_elt *p;
1540 rtx base;
1541 HOST_WIDE_INT start, end;
1543 /* If X is a register, dependencies on its contents
1544 are recorded through the qty number mechanism.
1545 Just change the qty number of the register,
1546 mark it as invalid for expressions that refer to it,
1547 and remove it itself. */
1549 if (GET_CODE (x) == REG)
1551 register int regno = REGNO (x);
1552 register unsigned hash = HASH (x, GET_MODE (x));
1554 /* Remove REGNO from any quantity list it might be on and indicate
1555 that it's value might have changed. If it is a pseudo, remove its
1556 entry from the hash table.
1558 For a hard register, we do the first two actions above for any
1559 additional hard registers corresponding to X. Then, if any of these
1560 registers are in the table, we must remove any REG entries that
1561 overlap these registers. */
1563 delete_reg_equiv (regno);
1564 reg_tick[regno]++;
1566 if (regno >= FIRST_PSEUDO_REGISTER)
1568 /* Because a register can be referenced in more than one mode,
1569 we might have to remove more than one table entry. */
1571 struct table_elt *elt;
1573 while (elt = lookup_for_remove (x, hash, GET_MODE (x)))
1574 remove_from_table (elt, hash);
1576 else
1578 HOST_WIDE_INT in_table
1579 = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
1580 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1581 int tregno, tendregno;
1582 register struct table_elt *p, *next;
1584 CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
1586 for (i = regno + 1; i < endregno; i++)
1588 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
1589 CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
1590 delete_reg_equiv (i);
1591 reg_tick[i]++;
1594 if (in_table)
1595 for (hash = 0; hash < NBUCKETS; hash++)
1596 for (p = table[hash]; p; p = next)
1598 next = p->next_same_hash;
1600 if (GET_CODE (p->exp) != REG
1601 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1602 continue;
1604 tregno = REGNO (p->exp);
1605 tendregno
1606 = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
1607 if (tendregno > regno && tregno < endregno)
1608 remove_from_table (p, hash);
1612 return;
1615 if (GET_CODE (x) == SUBREG)
1617 if (GET_CODE (SUBREG_REG (x)) != REG)
1618 abort ();
1619 invalidate (SUBREG_REG (x), VOIDmode);
1620 return;
1623 /* X is not a register; it must be a memory reference with
1624 a nonvarying address. Remove all hash table elements
1625 that refer to overlapping pieces of memory. */
1627 if (GET_CODE (x) != MEM)
1628 abort ();
1630 if (full_mode == VOIDmode)
1631 full_mode = GET_MODE (x);
1633 set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (full_mode),
1634 &base, &start, &end);
1636 for (i = 0; i < NBUCKETS; i++)
1638 register struct table_elt *next;
1639 for (p = table[i]; p; p = next)
1641 next = p->next_same_hash;
1642 if (refers_to_mem_p (p->exp, base, start, end))
1643 remove_from_table (p, i);
1648 /* Remove all expressions that refer to register REGNO,
1649 since they are already invalid, and we are about to
1650 mark that register valid again and don't want the old
1651 expressions to reappear as valid. */
1653 static void
1654 remove_invalid_refs (regno)
1655 int regno;
1657 register int i;
1658 register struct table_elt *p, *next;
1660 for (i = 0; i < NBUCKETS; i++)
1661 for (p = table[i]; p; p = next)
1663 next = p->next_same_hash;
1664 if (GET_CODE (p->exp) != REG
1665 && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
1666 remove_from_table (p, i);
1670 /* Recompute the hash codes of any valid entries in the hash table that
1671 reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
1673 This is called when we make a jump equivalence. */
1675 static void
1676 rehash_using_reg (x)
1677 rtx x;
1679 int i;
1680 struct table_elt *p, *next;
1681 unsigned hash;
1683 if (GET_CODE (x) == SUBREG)
1684 x = SUBREG_REG (x);
1686 /* If X is not a register or if the register is known not to be in any
1687 valid entries in the table, we have no work to do. */
1689 if (GET_CODE (x) != REG
1690 || reg_in_table[REGNO (x)] < 0
1691 || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
1692 return;
1694 /* Scan all hash chains looking for valid entries that mention X.
1695 If we find one and it is in the wrong hash chain, move it. We can skip
1696 objects that are registers, since they are handled specially. */
1698 for (i = 0; i < NBUCKETS; i++)
1699 for (p = table[i]; p; p = next)
1701 next = p->next_same_hash;
1702 if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
1703 && exp_equiv_p (p->exp, p->exp, 1, 0)
1704 && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
1706 if (p->next_same_hash)
1707 p->next_same_hash->prev_same_hash = p->prev_same_hash;
1709 if (p->prev_same_hash)
1710 p->prev_same_hash->next_same_hash = p->next_same_hash;
1711 else
1712 table[i] = p->next_same_hash;
1714 p->next_same_hash = table[hash];
1715 p->prev_same_hash = 0;
1716 if (table[hash])
1717 table[hash]->prev_same_hash = p;
1718 table[hash] = p;
1723 /* Remove from the hash table all expressions that reference memory,
1724 or some of them as specified by *WRITES. */
1726 static void
1727 invalidate_memory (writes)
1728 struct write_data *writes;
1730 register int i;
1731 register struct table_elt *p, *next;
1732 int all = writes->all;
1733 int nonscalar = writes->nonscalar;
1735 for (i = 0; i < NBUCKETS; i++)
1736 for (p = table[i]; p; p = next)
1738 next = p->next_same_hash;
1739 if (p->in_memory
1740 && (all
1741 || (nonscalar && p->in_struct)
1742 || cse_rtx_addr_varies_p (p->exp)))
1743 remove_from_table (p, i);
1747 /* Remove from the hash table any expression that is a call-clobbered
1748 register. Also update their TICK values. */
1750 static void
1751 invalidate_for_call ()
1753 int regno, endregno;
1754 int i;
1755 unsigned hash;
1756 struct table_elt *p, *next;
1757 int in_table = 0;
1759 /* Go through all the hard registers. For each that is clobbered in
1760 a CALL_INSN, remove the register from quantity chains and update
1761 reg_tick if defined. Also see if any of these registers is currently
1762 in the table. */
1764 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1765 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1767 delete_reg_equiv (regno);
1768 if (reg_tick[regno] >= 0)
1769 reg_tick[regno]++;
1771 in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
1774 /* In the case where we have no call-clobbered hard registers in the
1775 table, we are done. Otherwise, scan the table and remove any
1776 entry that overlaps a call-clobbered register. */
1778 if (in_table)
1779 for (hash = 0; hash < NBUCKETS; hash++)
1780 for (p = table[hash]; p; p = next)
1782 next = p->next_same_hash;
1784 if (GET_CODE (p->exp) != REG
1785 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1786 continue;
1788 regno = REGNO (p->exp);
1789 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
1791 for (i = regno; i < endregno; i++)
1792 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1794 remove_from_table (p, hash);
1795 break;
1800 /* Given an expression X of type CONST,
1801 and ELT which is its table entry (or 0 if it
1802 is not in the hash table),
1803 return an alternate expression for X as a register plus integer.
1804 If none can be found, return 0. */
1806 static rtx
1807 use_related_value (x, elt)
1808 rtx x;
1809 struct table_elt *elt;
1811 register struct table_elt *relt = 0;
1812 register struct table_elt *p, *q;
1813 HOST_WIDE_INT offset;
1815 /* First, is there anything related known?
1816 If we have a table element, we can tell from that.
1817 Otherwise, must look it up. */
1819 if (elt != 0 && elt->related_value != 0)
1820 relt = elt;
1821 else if (elt == 0 && GET_CODE (x) == CONST)
1823 rtx subexp = get_related_value (x);
1824 if (subexp != 0)
1825 relt = lookup (subexp,
1826 safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
1827 GET_MODE (subexp));
1830 if (relt == 0)
1831 return 0;
1833 /* Search all related table entries for one that has an
1834 equivalent register. */
1836 p = relt;
1837 while (1)
1839 /* This loop is strange in that it is executed in two different cases.
1840 The first is when X is already in the table. Then it is searching
1841 the RELATED_VALUE list of X's class (RELT). The second case is when
1842 X is not in the table. Then RELT points to a class for the related
1843 value.
1845 Ensure that, whatever case we are in, that we ignore classes that have
1846 the same value as X. */
1848 if (rtx_equal_p (x, p->exp))
1849 q = 0;
1850 else
1851 for (q = p->first_same_value; q; q = q->next_same_value)
1852 if (GET_CODE (q->exp) == REG)
1853 break;
1855 if (q)
1856 break;
1858 p = p->related_value;
1860 /* We went all the way around, so there is nothing to be found.
1861 Alternatively, perhaps RELT was in the table for some other reason
1862 and it has no related values recorded. */
1863 if (p == relt || p == 0)
1864 break;
1867 if (q == 0)
1868 return 0;
1870 offset = (get_integer_term (x) - get_integer_term (p->exp));
1871 /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
1872 return plus_constant (q->exp, offset);
1875 /* Hash an rtx. We are careful to make sure the value is never negative.
1876 Equivalent registers hash identically.
1877 MODE is used in hashing for CONST_INTs only;
1878 otherwise the mode of X is used.
1880 Store 1 in do_not_record if any subexpression is volatile.
1882 Store 1 in hash_arg_in_memory if X contains a MEM rtx
1883 which does not have the RTX_UNCHANGING_P bit set.
1884 In this case, also store 1 in hash_arg_in_struct
1885 if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
1887 Note that cse_insn knows that the hash code of a MEM expression
1888 is just (int) MEM plus the hash code of the address. */
1890 static unsigned
1891 canon_hash (x, mode)
1892 rtx x;
1893 enum machine_mode mode;
1895 register int i, j;
1896 register unsigned hash = 0;
1897 register enum rtx_code code;
1898 register char *fmt;
1900 /* repeat is used to turn tail-recursion into iteration. */
1901 repeat:
1902 if (x == 0)
1903 return hash;
1905 code = GET_CODE (x);
1906 switch (code)
1908 case REG:
1910 register int regno = REGNO (x);
1912 /* On some machines, we can't record any non-fixed hard register,
1913 because extending its life will cause reload problems. We
1914 consider ap, fp, and sp to be fixed for this purpose.
1915 On all machines, we can't record any global registers. */
1917 if (regno < FIRST_PSEUDO_REGISTER
1918 && (global_regs[regno]
1919 || (SMALL_REGISTER_CLASSES
1920 && ! fixed_regs[regno]
1921 && regno != FRAME_POINTER_REGNUM
1922 && regno != HARD_FRAME_POINTER_REGNUM
1923 && regno != ARG_POINTER_REGNUM
1924 && regno != STACK_POINTER_REGNUM)))
1926 do_not_record = 1;
1927 return 0;
1929 hash += ((unsigned) REG << 7) + (unsigned) reg_qty[regno];
1930 return hash;
1933 case CONST_INT:
1935 unsigned HOST_WIDE_INT tem = INTVAL (x);
1936 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
1937 return hash;
1940 case CONST_DOUBLE:
1941 /* This is like the general case, except that it only counts
1942 the integers representing the constant. */
1943 hash += (unsigned) code + (unsigned) GET_MODE (x);
1944 if (GET_MODE (x) != VOIDmode)
1945 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1947 unsigned tem = XINT (x, i);
1948 hash += tem;
1950 else
1951 hash += ((unsigned) CONST_DOUBLE_LOW (x)
1952 + (unsigned) CONST_DOUBLE_HIGH (x));
1953 return hash;
1955 /* Assume there is only one rtx object for any given label. */
1956 case LABEL_REF:
1957 hash
1958 += ((unsigned) LABEL_REF << 7) + (unsigned HOST_WIDE_INT) XEXP (x, 0);
1959 return hash;
1961 case SYMBOL_REF:
1962 hash
1963 += ((unsigned) SYMBOL_REF << 7) + (unsigned HOST_WIDE_INT) XSTR (x, 0);
1964 return hash;
1966 case MEM:
1967 if (MEM_VOLATILE_P (x))
1969 do_not_record = 1;
1970 return 0;
1972 if (! RTX_UNCHANGING_P (x) || FIXED_BASE_PLUS_P (XEXP (x, 0)))
1974 hash_arg_in_memory = 1;
1975 if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
1977 /* Now that we have already found this special case,
1978 might as well speed it up as much as possible. */
1979 hash += (unsigned) MEM;
1980 x = XEXP (x, 0);
1981 goto repeat;
1983 case PRE_DEC:
1984 case PRE_INC:
1985 case POST_DEC:
1986 case POST_INC:
1987 case PC:
1988 case CC0:
1989 case CALL:
1990 case UNSPEC_VOLATILE:
1991 do_not_record = 1;
1992 return 0;
1994 case ASM_OPERANDS:
1995 if (MEM_VOLATILE_P (x))
1997 do_not_record = 1;
1998 return 0;
2000 break;
2002 default:
2003 break;
2006 i = GET_RTX_LENGTH (code) - 1;
2007 hash += (unsigned) code + (unsigned) GET_MODE (x);
2008 fmt = GET_RTX_FORMAT (code);
2009 for (; i >= 0; i--)
2011 if (fmt[i] == 'e')
2013 rtx tem = XEXP (x, i);
2015 /* If we are about to do the last recursive call
2016 needed at this level, change it into iteration.
2017 This function is called enough to be worth it. */
2018 if (i == 0)
2020 x = tem;
2021 goto repeat;
2023 hash += canon_hash (tem, 0);
2025 else if (fmt[i] == 'E')
2026 for (j = 0; j < XVECLEN (x, i); j++)
2027 hash += canon_hash (XVECEXP (x, i, j), 0);
2028 else if (fmt[i] == 's')
2030 register unsigned char *p = (unsigned char *) XSTR (x, i);
2031 if (p)
2032 while (*p)
2033 hash += *p++;
2035 else if (fmt[i] == 'i')
2037 register unsigned tem = XINT (x, i);
2038 hash += tem;
2040 else if (fmt[i] == '0')
2041 /* unused */;
2042 else
2043 abort ();
2045 return hash;
2048 /* Like canon_hash but with no side effects. */
2050 static unsigned
2051 safe_hash (x, mode)
2052 rtx x;
2053 enum machine_mode mode;
2055 int save_do_not_record = do_not_record;
2056 int save_hash_arg_in_memory = hash_arg_in_memory;
2057 int save_hash_arg_in_struct = hash_arg_in_struct;
2058 unsigned hash = canon_hash (x, mode);
2059 hash_arg_in_memory = save_hash_arg_in_memory;
2060 hash_arg_in_struct = save_hash_arg_in_struct;
2061 do_not_record = save_do_not_record;
2062 return hash;
2065 /* Return 1 iff X and Y would canonicalize into the same thing,
2066 without actually constructing the canonicalization of either one.
2067 If VALIDATE is nonzero,
2068 we assume X is an expression being processed from the rtl
2069 and Y was found in the hash table. We check register refs
2070 in Y for being marked as valid.
2072 If EQUAL_VALUES is nonzero, we allow a register to match a constant value
2073 that is known to be in the register. Ordinarily, we don't allow them
2074 to match, because letting them match would cause unpredictable results
2075 in all the places that search a hash table chain for an equivalent
2076 for a given value. A possible equivalent that has different structure
2077 has its hash code computed from different data. Whether the hash code
2078 is the same as that of the the given value is pure luck. */
2080 static int
2081 exp_equiv_p (x, y, validate, equal_values)
2082 rtx x, y;
2083 int validate;
2084 int equal_values;
2086 register int i, j;
2087 register enum rtx_code code;
2088 register char *fmt;
2090 /* Note: it is incorrect to assume an expression is equivalent to itself
2091 if VALIDATE is nonzero. */
2092 if (x == y && !validate)
2093 return 1;
2094 if (x == 0 || y == 0)
2095 return x == y;
2097 code = GET_CODE (x);
2098 if (code != GET_CODE (y))
2100 if (!equal_values)
2101 return 0;
2103 /* If X is a constant and Y is a register or vice versa, they may be
2104 equivalent. We only have to validate if Y is a register. */
2105 if (CONSTANT_P (x) && GET_CODE (y) == REG
2106 && REGNO_QTY_VALID_P (REGNO (y))
2107 && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
2108 && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
2109 && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
2110 return 1;
2112 if (CONSTANT_P (y) && code == REG
2113 && REGNO_QTY_VALID_P (REGNO (x))
2114 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
2115 && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
2116 return 1;
2118 return 0;
2121 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
2122 if (GET_MODE (x) != GET_MODE (y))
2123 return 0;
2125 switch (code)
2127 case PC:
2128 case CC0:
2129 return x == y;
2131 case CONST_INT:
2132 return INTVAL (x) == INTVAL (y);
2134 case LABEL_REF:
2135 return XEXP (x, 0) == XEXP (y, 0);
2137 case SYMBOL_REF:
2138 return XSTR (x, 0) == XSTR (y, 0);
2140 case REG:
2142 int regno = REGNO (y);
2143 int endregno
2144 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
2145 : HARD_REGNO_NREGS (regno, GET_MODE (y)));
2146 int i;
2148 /* If the quantities are not the same, the expressions are not
2149 equivalent. If there are and we are not to validate, they
2150 are equivalent. Otherwise, ensure all regs are up-to-date. */
2152 if (reg_qty[REGNO (x)] != reg_qty[regno])
2153 return 0;
2155 if (! validate)
2156 return 1;
2158 for (i = regno; i < endregno; i++)
2159 if (reg_in_table[i] != reg_tick[i])
2160 return 0;
2162 return 1;
2165 /* For commutative operations, check both orders. */
2166 case PLUS:
2167 case MULT:
2168 case AND:
2169 case IOR:
2170 case XOR:
2171 case NE:
2172 case EQ:
2173 return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
2174 && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
2175 validate, equal_values))
2176 || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
2177 validate, equal_values)
2178 && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
2179 validate, equal_values)));
2181 default:
2182 break;
2185 /* Compare the elements. If any pair of corresponding elements
2186 fail to match, return 0 for the whole things. */
2188 fmt = GET_RTX_FORMAT (code);
2189 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2191 switch (fmt[i])
2193 case 'e':
2194 if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
2195 return 0;
2196 break;
2198 case 'E':
2199 if (XVECLEN (x, i) != XVECLEN (y, i))
2200 return 0;
2201 for (j = 0; j < XVECLEN (x, i); j++)
2202 if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2203 validate, equal_values))
2204 return 0;
2205 break;
2207 case 's':
2208 if (strcmp (XSTR (x, i), XSTR (y, i)))
2209 return 0;
2210 break;
2212 case 'i':
2213 if (XINT (x, i) != XINT (y, i))
2214 return 0;
2215 break;
2217 case 'w':
2218 if (XWINT (x, i) != XWINT (y, i))
2219 return 0;
2220 break;
2222 case '0':
2223 break;
2225 default:
2226 abort ();
2230 return 1;
2233 /* Return 1 iff any subexpression of X matches Y.
2234 Here we do not require that X or Y be valid (for registers referred to)
2235 for being in the hash table. */
2237 static int
2238 refers_to_p (x, y)
2239 rtx x, y;
2241 register int i;
2242 register enum rtx_code code;
2243 register char *fmt;
2245 repeat:
2246 if (x == y)
2247 return 1;
2248 if (x == 0 || y == 0)
2249 return 0;
2251 code = GET_CODE (x);
2252 /* If X as a whole has the same code as Y, they may match.
2253 If so, return 1. */
2254 if (code == GET_CODE (y))
2256 if (exp_equiv_p (x, y, 0, 1))
2257 return 1;
2260 /* X does not match, so try its subexpressions. */
2262 fmt = GET_RTX_FORMAT (code);
2263 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2264 if (fmt[i] == 'e')
2266 if (i == 0)
2268 x = XEXP (x, 0);
2269 goto repeat;
2271 else
2272 if (refers_to_p (XEXP (x, i), y))
2273 return 1;
2275 else if (fmt[i] == 'E')
2277 int j;
2278 for (j = 0; j < XVECLEN (x, i); j++)
2279 if (refers_to_p (XVECEXP (x, i, j), y))
2280 return 1;
2283 return 0;
2286 /* Given ADDR and SIZE (a memory address, and the size of the memory reference),
2287 set PBASE, PSTART, and PEND which correspond to the base of the address,
2288 the starting offset, and ending offset respectively.
2290 ADDR is known to be a nonvarying address. */
2292 /* ??? Despite what the comments say, this function is in fact frequently
2293 passed varying addresses. This does not appear to cause any problems. */
2295 static void
2296 set_nonvarying_address_components (addr, size, pbase, pstart, pend)
2297 rtx addr;
2298 int size;
2299 rtx *pbase;
2300 HOST_WIDE_INT *pstart, *pend;
2302 rtx base;
2303 HOST_WIDE_INT start, end;
2305 base = addr;
2306 start = 0;
2307 end = 0;
2309 /* Registers with nonvarying addresses usually have constant equivalents;
2310 but the frame pointer register is also possible. */
2311 if (GET_CODE (base) == REG
2312 && qty_const != 0
2313 && REGNO_QTY_VALID_P (REGNO (base))
2314 && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
2315 && qty_const[reg_qty[REGNO (base)]] != 0)
2316 base = qty_const[reg_qty[REGNO (base)]];
2317 else if (GET_CODE (base) == PLUS
2318 && GET_CODE (XEXP (base, 1)) == CONST_INT
2319 && GET_CODE (XEXP (base, 0)) == REG
2320 && qty_const != 0
2321 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2322 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2323 == GET_MODE (XEXP (base, 0)))
2324 && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
2326 start = INTVAL (XEXP (base, 1));
2327 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2329 /* This can happen as the result of virtual register instantiation,
2330 if the initial offset is too large to be a valid address. */
2331 else if (GET_CODE (base) == PLUS
2332 && GET_CODE (XEXP (base, 0)) == REG
2333 && GET_CODE (XEXP (base, 1)) == REG
2334 && qty_const != 0
2335 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2336 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2337 == GET_MODE (XEXP (base, 0)))
2338 && qty_const[reg_qty[REGNO (XEXP (base, 0))]]
2339 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 1)))
2340 && (qty_mode[reg_qty[REGNO (XEXP (base, 1))]]
2341 == GET_MODE (XEXP (base, 1)))
2342 && qty_const[reg_qty[REGNO (XEXP (base, 1))]])
2344 rtx tem = qty_const[reg_qty[REGNO (XEXP (base, 1))]];
2345 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2347 /* One of the two values must be a constant. */
2348 if (GET_CODE (base) != CONST_INT)
2350 if (GET_CODE (tem) != CONST_INT)
2351 abort ();
2352 start = INTVAL (tem);
2354 else
2356 start = INTVAL (base);
2357 base = tem;
2361 /* Handle everything that we can find inside an address that has been
2362 viewed as constant. */
2364 while (1)
2366 /* If no part of this switch does a "continue", the code outside
2367 will exit this loop. */
2369 switch (GET_CODE (base))
2371 case LO_SUM:
2372 /* By definition, operand1 of a LO_SUM is the associated constant
2373 address. Use the associated constant address as the base
2374 instead. */
2375 base = XEXP (base, 1);
2376 continue;
2378 case CONST:
2379 /* Strip off CONST. */
2380 base = XEXP (base, 0);
2381 continue;
2383 case PLUS:
2384 if (GET_CODE (XEXP (base, 1)) == CONST_INT)
2386 start += INTVAL (XEXP (base, 1));
2387 base = XEXP (base, 0);
2388 continue;
2390 break;
2392 case AND:
2393 /* Handle the case of an AND which is the negative of a power of
2394 two. This is used to represent unaligned memory operations. */
2395 if (GET_CODE (XEXP (base, 1)) == CONST_INT
2396 && exact_log2 (- INTVAL (XEXP (base, 1))) > 0)
2398 set_nonvarying_address_components (XEXP (base, 0), size,
2399 pbase, pstart, pend);
2401 /* Assume the worst misalignment. START is affected, but not
2402 END, so compensate but adjusting SIZE. Don't lose any
2403 constant we already had. */
2405 size = *pend - *pstart - INTVAL (XEXP (base, 1)) - 1;
2406 start += *pstart + INTVAL (XEXP (base, 1)) + 1;
2407 end += *pend;
2408 base = *pbase;
2410 break;
2412 default:
2413 break;
2416 break;
2419 if (GET_CODE (base) == CONST_INT)
2421 start += INTVAL (base);
2422 base = const0_rtx;
2425 end = start + size;
2427 /* Set the return values. */
2428 *pbase = base;
2429 *pstart = start;
2430 *pend = end;
2433 /* Return 1 iff any subexpression of X refers to memory
2434 at an address of BASE plus some offset
2435 such that any of the bytes' offsets fall between START (inclusive)
2436 and END (exclusive).
2438 The value is undefined if X is a varying address (as determined by
2439 cse_rtx_addr_varies_p). This function is not used in such cases.
2441 When used in the cse pass, `qty_const' is nonzero, and it is used
2442 to treat an address that is a register with a known constant value
2443 as if it were that constant value.
2444 In the loop pass, `qty_const' is zero, so this is not done. */
2446 static int
2447 refers_to_mem_p (x, base, start, end)
2448 rtx x, base;
2449 HOST_WIDE_INT start, end;
2451 register HOST_WIDE_INT i;
2452 register enum rtx_code code;
2453 register char *fmt;
2455 repeat:
2456 if (x == 0)
2457 return 0;
2459 code = GET_CODE (x);
2460 if (code == MEM)
2462 register rtx addr = XEXP (x, 0); /* Get the address. */
2463 rtx mybase;
2464 HOST_WIDE_INT mystart, myend;
2466 set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)),
2467 &mybase, &mystart, &myend);
2470 /* refers_to_mem_p is never called with varying addresses.
2471 If the base addresses are not equal, there is no chance
2472 of the memory addresses conflicting. */
2473 if (! rtx_equal_p (mybase, base))
2474 return 0;
2476 return myend > start && mystart < end;
2479 /* X does not match, so try its subexpressions. */
2481 fmt = GET_RTX_FORMAT (code);
2482 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2483 if (fmt[i] == 'e')
2485 if (i == 0)
2487 x = XEXP (x, 0);
2488 goto repeat;
2490 else
2491 if (refers_to_mem_p (XEXP (x, i), base, start, end))
2492 return 1;
2494 else if (fmt[i] == 'E')
2496 int j;
2497 for (j = 0; j < XVECLEN (x, i); j++)
2498 if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end))
2499 return 1;
2502 return 0;
2505 /* Nonzero if X refers to memory at a varying address;
2506 except that a register which has at the moment a known constant value
2507 isn't considered variable. */
2509 static int
2510 cse_rtx_addr_varies_p (x)
2511 rtx x;
2513 /* We need not check for X and the equivalence class being of the same
2514 mode because if X is equivalent to a constant in some mode, it
2515 doesn't vary in any mode. */
2517 if (GET_CODE (x) == MEM
2518 && GET_CODE (XEXP (x, 0)) == REG
2519 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
2520 && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]
2521 && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
2522 return 0;
2524 if (GET_CODE (x) == MEM
2525 && GET_CODE (XEXP (x, 0)) == PLUS
2526 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2527 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2528 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2529 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2530 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2531 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2532 return 0;
2534 /* This can happen as the result of virtual register instantiation, if
2535 the initial constant is too large to be a valid address. This gives
2536 us a three instruction sequence, load large offset into a register,
2537 load fp minus a constant into a register, then a MEM which is the
2538 sum of the two `constant' registers. */
2539 if (GET_CODE (x) == MEM
2540 && GET_CODE (XEXP (x, 0)) == PLUS
2541 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2542 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
2543 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2544 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2545 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2546 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]]
2547 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 1)))
2548 && (GET_MODE (XEXP (XEXP (x, 0), 1))
2549 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2550 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2551 return 0;
2553 return rtx_addr_varies_p (x);
2556 /* Canonicalize an expression:
2557 replace each register reference inside it
2558 with the "oldest" equivalent register.
2560 If INSN is non-zero and we are replacing a pseudo with a hard register
2561 or vice versa, validate_change is used to ensure that INSN remains valid
2562 after we make our substitution. The calls are made with IN_GROUP non-zero
2563 so apply_change_group must be called upon the outermost return from this
2564 function (unless INSN is zero). The result of apply_change_group can
2565 generally be discarded since the changes we are making are optional. */
2567 static rtx
2568 canon_reg (x, insn)
2569 rtx x;
2570 rtx insn;
2572 register int i;
2573 register enum rtx_code code;
2574 register char *fmt;
2576 if (x == 0)
2577 return x;
2579 code = GET_CODE (x);
2580 switch (code)
2582 case PC:
2583 case CC0:
2584 case CONST:
2585 case CONST_INT:
2586 case CONST_DOUBLE:
2587 case SYMBOL_REF:
2588 case LABEL_REF:
2589 case ADDR_VEC:
2590 case ADDR_DIFF_VEC:
2591 return x;
2593 case REG:
2595 register int first;
2597 /* Never replace a hard reg, because hard regs can appear
2598 in more than one machine mode, and we must preserve the mode
2599 of each occurrence. Also, some hard regs appear in
2600 MEMs that are shared and mustn't be altered. Don't try to
2601 replace any reg that maps to a reg of class NO_REGS. */
2602 if (REGNO (x) < FIRST_PSEUDO_REGISTER
2603 || ! REGNO_QTY_VALID_P (REGNO (x)))
2604 return x;
2606 first = qty_first_reg[reg_qty[REGNO (x)]];
2607 return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
2608 : REGNO_REG_CLASS (first) == NO_REGS ? x
2609 : gen_rtx (REG, qty_mode[reg_qty[REGNO (x)]], first));
2612 default:
2613 break;
2616 fmt = GET_RTX_FORMAT (code);
2617 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2619 register int j;
2621 if (fmt[i] == 'e')
2623 rtx new = canon_reg (XEXP (x, i), insn);
2624 int insn_code;
2626 /* If replacing pseudo with hard reg or vice versa, ensure the
2627 insn remains valid. Likewise if the insn has MATCH_DUPs. */
2628 if (insn != 0 && new != 0
2629 && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
2630 && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
2631 != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
2632 || (insn_code = recog_memoized (insn)) < 0
2633 || insn_n_dups[insn_code] > 0))
2634 validate_change (insn, &XEXP (x, i), new, 1);
2635 else
2636 XEXP (x, i) = new;
2638 else if (fmt[i] == 'E')
2639 for (j = 0; j < XVECLEN (x, i); j++)
2640 XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
2643 return x;
2646 /* LOC is a location within INSN that is an operand address (the contents of
2647 a MEM). Find the best equivalent address to use that is valid for this
2648 insn.
2650 On most CISC machines, complicated address modes are costly, and rtx_cost
2651 is a good approximation for that cost. However, most RISC machines have
2652 only a few (usually only one) memory reference formats. If an address is
2653 valid at all, it is often just as cheap as any other address. Hence, for
2654 RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
2655 costs of various addresses. For two addresses of equal cost, choose the one
2656 with the highest `rtx_cost' value as that has the potential of eliminating
2657 the most insns. For equal costs, we choose the first in the equivalence
2658 class. Note that we ignore the fact that pseudo registers are cheaper
2659 than hard registers here because we would also prefer the pseudo registers.
2662 static void
2663 find_best_addr (insn, loc)
2664 rtx insn;
2665 rtx *loc;
2667 struct table_elt *elt, *p;
2668 rtx addr = *loc;
2669 int our_cost;
2670 int found_better = 1;
2671 int save_do_not_record = do_not_record;
2672 int save_hash_arg_in_memory = hash_arg_in_memory;
2673 int save_hash_arg_in_struct = hash_arg_in_struct;
2674 int addr_volatile;
2675 int regno;
2676 unsigned hash;
2678 /* Do not try to replace constant addresses or addresses of local and
2679 argument slots. These MEM expressions are made only once and inserted
2680 in many instructions, as well as being used to control symbol table
2681 output. It is not safe to clobber them.
2683 There are some uncommon cases where the address is already in a register
2684 for some reason, but we cannot take advantage of that because we have
2685 no easy way to unshare the MEM. In addition, looking up all stack
2686 addresses is costly. */
2687 if ((GET_CODE (addr) == PLUS
2688 && GET_CODE (XEXP (addr, 0)) == REG
2689 && GET_CODE (XEXP (addr, 1)) == CONST_INT
2690 && (regno = REGNO (XEXP (addr, 0)),
2691 regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
2692 || regno == ARG_POINTER_REGNUM))
2693 || (GET_CODE (addr) == REG
2694 && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
2695 || regno == HARD_FRAME_POINTER_REGNUM
2696 || regno == ARG_POINTER_REGNUM))
2697 || GET_CODE (addr) == ADDRESSOF
2698 || CONSTANT_ADDRESS_P (addr))
2699 return;
2701 /* If this address is not simply a register, try to fold it. This will
2702 sometimes simplify the expression. Many simplifications
2703 will not be valid, but some, usually applying the associative rule, will
2704 be valid and produce better code. */
2705 if (GET_CODE (addr) != REG)
2707 rtx folded = fold_rtx (copy_rtx (addr), NULL_RTX);
2709 if (1
2710 #ifdef ADDRESS_COST
2711 && (ADDRESS_COST (folded) < ADDRESS_COST (addr)
2712 || (ADDRESS_COST (folded) == ADDRESS_COST (addr)
2713 && rtx_cost (folded, MEM) > rtx_cost (addr, MEM)))
2714 #else
2715 && rtx_cost (folded, MEM) < rtx_cost (addr, MEM)
2716 #endif
2717 && validate_change (insn, loc, folded, 0))
2718 addr = folded;
2721 /* If this address is not in the hash table, we can't look for equivalences
2722 of the whole address. Also, ignore if volatile. */
2724 do_not_record = 0;
2725 hash = HASH (addr, Pmode);
2726 addr_volatile = do_not_record;
2727 do_not_record = save_do_not_record;
2728 hash_arg_in_memory = save_hash_arg_in_memory;
2729 hash_arg_in_struct = save_hash_arg_in_struct;
2731 if (addr_volatile)
2732 return;
2734 elt = lookup (addr, hash, Pmode);
2736 #ifndef ADDRESS_COST
2737 if (elt)
2739 our_cost = elt->cost;
2741 /* Find the lowest cost below ours that works. */
2742 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
2743 if (elt->cost < our_cost
2744 && (GET_CODE (elt->exp) == REG
2745 || exp_equiv_p (elt->exp, elt->exp, 1, 0))
2746 && validate_change (insn, loc,
2747 canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
2748 return;
2750 #else
2752 if (elt)
2754 /* We need to find the best (under the criteria documented above) entry
2755 in the class that is valid. We use the `flag' field to indicate
2756 choices that were invalid and iterate until we can't find a better
2757 one that hasn't already been tried. */
2759 for (p = elt->first_same_value; p; p = p->next_same_value)
2760 p->flag = 0;
2762 while (found_better)
2764 int best_addr_cost = ADDRESS_COST (*loc);
2765 int best_rtx_cost = (elt->cost + 1) >> 1;
2766 struct table_elt *best_elt = elt;
2768 found_better = 0;
2769 for (p = elt->first_same_value; p; p = p->next_same_value)
2770 if (! p->flag
2771 && (GET_CODE (p->exp) == REG
2772 || exp_equiv_p (p->exp, p->exp, 1, 0))
2773 && (ADDRESS_COST (p->exp) < best_addr_cost
2774 || (ADDRESS_COST (p->exp) == best_addr_cost
2775 && (p->cost + 1) >> 1 > best_rtx_cost)))
2777 found_better = 1;
2778 best_addr_cost = ADDRESS_COST (p->exp);
2779 best_rtx_cost = (p->cost + 1) >> 1;
2780 best_elt = p;
2783 if (found_better)
2785 if (validate_change (insn, loc,
2786 canon_reg (copy_rtx (best_elt->exp),
2787 NULL_RTX), 0))
2788 return;
2789 else
2790 best_elt->flag = 1;
2795 /* If the address is a binary operation with the first operand a register
2796 and the second a constant, do the same as above, but looking for
2797 equivalences of the register. Then try to simplify before checking for
2798 the best address to use. This catches a few cases: First is when we
2799 have REG+const and the register is another REG+const. We can often merge
2800 the constants and eliminate one insn and one register. It may also be
2801 that a machine has a cheap REG+REG+const. Finally, this improves the
2802 code on the Alpha for unaligned byte stores. */
2804 if (flag_expensive_optimizations
2805 && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
2806 || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
2807 && GET_CODE (XEXP (*loc, 0)) == REG
2808 && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
2810 rtx c = XEXP (*loc, 1);
2812 do_not_record = 0;
2813 hash = HASH (XEXP (*loc, 0), Pmode);
2814 do_not_record = save_do_not_record;
2815 hash_arg_in_memory = save_hash_arg_in_memory;
2816 hash_arg_in_struct = save_hash_arg_in_struct;
2818 elt = lookup (XEXP (*loc, 0), hash, Pmode);
2819 if (elt == 0)
2820 return;
2822 /* We need to find the best (under the criteria documented above) entry
2823 in the class that is valid. We use the `flag' field to indicate
2824 choices that were invalid and iterate until we can't find a better
2825 one that hasn't already been tried. */
2827 for (p = elt->first_same_value; p; p = p->next_same_value)
2828 p->flag = 0;
2830 while (found_better)
2832 int best_addr_cost = ADDRESS_COST (*loc);
2833 int best_rtx_cost = (COST (*loc) + 1) >> 1;
2834 struct table_elt *best_elt = elt;
2835 rtx best_rtx = *loc;
2836 int count;
2838 /* This is at worst case an O(n^2) algorithm, so limit our search
2839 to the first 32 elements on the list. This avoids trouble
2840 compiling code with very long basic blocks that can easily
2841 call cse_gen_binary so many times that we run out of memory. */
2843 found_better = 0;
2844 for (p = elt->first_same_value, count = 0;
2845 p && count < 32;
2846 p = p->next_same_value, count++)
2847 if (! p->flag
2848 && (GET_CODE (p->exp) == REG
2849 || exp_equiv_p (p->exp, p->exp, 1, 0)))
2851 rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
2853 if ((ADDRESS_COST (new) < best_addr_cost
2854 || (ADDRESS_COST (new) == best_addr_cost
2855 && (COST (new) + 1) >> 1 > best_rtx_cost)))
2857 found_better = 1;
2858 best_addr_cost = ADDRESS_COST (new);
2859 best_rtx_cost = (COST (new) + 1) >> 1;
2860 best_elt = p;
2861 best_rtx = new;
2865 if (found_better)
2867 if (validate_change (insn, loc,
2868 canon_reg (copy_rtx (best_rtx),
2869 NULL_RTX), 0))
2870 return;
2871 else
2872 best_elt->flag = 1;
2876 #endif
2879 /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
2880 operation (EQ, NE, GT, etc.), follow it back through the hash table and
2881 what values are being compared.
2883 *PARG1 and *PARG2 are updated to contain the rtx representing the values
2884 actually being compared. For example, if *PARG1 was (cc0) and *PARG2
2885 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
2886 compared to produce cc0.
2888 The return value is the comparison operator and is either the code of
2889 A or the code corresponding to the inverse of the comparison. */
2891 static enum rtx_code
2892 find_comparison_args (code, parg1, parg2, pmode1, pmode2)
2893 enum rtx_code code;
2894 rtx *parg1, *parg2;
2895 enum machine_mode *pmode1, *pmode2;
2897 rtx arg1, arg2;
2899 arg1 = *parg1, arg2 = *parg2;
2901 /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
2903 while (arg2 == CONST0_RTX (GET_MODE (arg1)))
2905 /* Set non-zero when we find something of interest. */
2906 rtx x = 0;
2907 int reverse_code = 0;
2908 struct table_elt *p = 0;
2910 /* If arg1 is a COMPARE, extract the comparison arguments from it.
2911 On machines with CC0, this is the only case that can occur, since
2912 fold_rtx will return the COMPARE or item being compared with zero
2913 when given CC0. */
2915 if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
2916 x = arg1;
2918 /* If ARG1 is a comparison operator and CODE is testing for
2919 STORE_FLAG_VALUE, get the inner arguments. */
2921 else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
2923 if (code == NE
2924 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2925 && code == LT && STORE_FLAG_VALUE == -1)
2926 #ifdef FLOAT_STORE_FLAG_VALUE
2927 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2928 && FLOAT_STORE_FLAG_VALUE < 0)
2929 #endif
2931 x = arg1;
2932 else if (code == EQ
2933 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2934 && code == GE && STORE_FLAG_VALUE == -1)
2935 #ifdef FLOAT_STORE_FLAG_VALUE
2936 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2937 && FLOAT_STORE_FLAG_VALUE < 0)
2938 #endif
2940 x = arg1, reverse_code = 1;
2943 /* ??? We could also check for
2945 (ne (and (eq (...) (const_int 1))) (const_int 0))
2947 and related forms, but let's wait until we see them occurring. */
2949 if (x == 0)
2950 /* Look up ARG1 in the hash table and see if it has an equivalence
2951 that lets us see what is being compared. */
2952 p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
2953 GET_MODE (arg1));
2954 if (p) p = p->first_same_value;
2956 for (; p; p = p->next_same_value)
2958 enum machine_mode inner_mode = GET_MODE (p->exp);
2960 /* If the entry isn't valid, skip it. */
2961 if (! exp_equiv_p (p->exp, p->exp, 1, 0))
2962 continue;
2964 if (GET_CODE (p->exp) == COMPARE
2965 /* Another possibility is that this machine has a compare insn
2966 that includes the comparison code. In that case, ARG1 would
2967 be equivalent to a comparison operation that would set ARG1 to
2968 either STORE_FLAG_VALUE or zero. If this is an NE operation,
2969 ORIG_CODE is the actual comparison being done; if it is an EQ,
2970 we must reverse ORIG_CODE. On machine with a negative value
2971 for STORE_FLAG_VALUE, also look at LT and GE operations. */
2972 || ((code == NE
2973 || (code == LT
2974 && GET_MODE_CLASS (inner_mode) == MODE_INT
2975 && (GET_MODE_BITSIZE (inner_mode)
2976 <= HOST_BITS_PER_WIDE_INT)
2977 && (STORE_FLAG_VALUE
2978 & ((HOST_WIDE_INT) 1
2979 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2980 #ifdef FLOAT_STORE_FLAG_VALUE
2981 || (code == LT
2982 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2983 && FLOAT_STORE_FLAG_VALUE < 0)
2984 #endif
2986 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
2988 x = p->exp;
2989 break;
2991 else if ((code == EQ
2992 || (code == GE
2993 && GET_MODE_CLASS (inner_mode) == MODE_INT
2994 && (GET_MODE_BITSIZE (inner_mode)
2995 <= HOST_BITS_PER_WIDE_INT)
2996 && (STORE_FLAG_VALUE
2997 & ((HOST_WIDE_INT) 1
2998 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2999 #ifdef FLOAT_STORE_FLAG_VALUE
3000 || (code == GE
3001 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
3002 && FLOAT_STORE_FLAG_VALUE < 0)
3003 #endif
3005 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
3007 reverse_code = 1;
3008 x = p->exp;
3009 break;
3012 /* If this is fp + constant, the equivalent is a better operand since
3013 it may let us predict the value of the comparison. */
3014 else if (NONZERO_BASE_PLUS_P (p->exp))
3016 arg1 = p->exp;
3017 continue;
3021 /* If we didn't find a useful equivalence for ARG1, we are done.
3022 Otherwise, set up for the next iteration. */
3023 if (x == 0)
3024 break;
3026 arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
3027 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
3028 code = GET_CODE (x);
3030 if (reverse_code)
3031 code = reverse_condition (code);
3034 /* Return our results. Return the modes from before fold_rtx
3035 because fold_rtx might produce const_int, and then it's too late. */
3036 *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
3037 *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
3039 return code;
3042 /* Try to simplify a unary operation CODE whose output mode is to be
3043 MODE with input operand OP whose mode was originally OP_MODE.
3044 Return zero if no simplification can be made. */
3047 simplify_unary_operation (code, mode, op, op_mode)
3048 enum rtx_code code;
3049 enum machine_mode mode;
3050 rtx op;
3051 enum machine_mode op_mode;
3053 register int width = GET_MODE_BITSIZE (mode);
3055 /* The order of these tests is critical so that, for example, we don't
3056 check the wrong mode (input vs. output) for a conversion operation,
3057 such as FIX. At some point, this should be simplified. */
3059 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
3061 if (code == FLOAT && GET_MODE (op) == VOIDmode
3062 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3064 HOST_WIDE_INT hv, lv;
3065 REAL_VALUE_TYPE d;
3067 if (GET_CODE (op) == CONST_INT)
3068 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3069 else
3070 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3072 #ifdef REAL_ARITHMETIC
3073 REAL_VALUE_FROM_INT (d, lv, hv, mode);
3074 #else
3075 if (hv < 0)
3077 d = (double) (~ hv);
3078 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3079 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3080 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
3081 d = (- d - 1.0);
3083 else
3085 d = (double) hv;
3086 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3087 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3088 d += (double) (unsigned HOST_WIDE_INT) lv;
3090 #endif /* REAL_ARITHMETIC */
3091 d = real_value_truncate (mode, d);
3092 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3094 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
3095 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3097 HOST_WIDE_INT hv, lv;
3098 REAL_VALUE_TYPE d;
3100 if (GET_CODE (op) == CONST_INT)
3101 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3102 else
3103 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3105 if (op_mode == VOIDmode)
3107 /* We don't know how to interpret negative-looking numbers in
3108 this case, so don't try to fold those. */
3109 if (hv < 0)
3110 return 0;
3112 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
3114 else
3115 hv = 0, lv &= GET_MODE_MASK (op_mode);
3117 #ifdef REAL_ARITHMETIC
3118 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
3119 #else
3121 d = (double) (unsigned HOST_WIDE_INT) hv;
3122 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3123 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3124 d += (double) (unsigned HOST_WIDE_INT) lv;
3125 #endif /* REAL_ARITHMETIC */
3126 d = real_value_truncate (mode, d);
3127 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3129 #endif
3131 if (GET_CODE (op) == CONST_INT
3132 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3134 register HOST_WIDE_INT arg0 = INTVAL (op);
3135 register HOST_WIDE_INT val;
3137 switch (code)
3139 case NOT:
3140 val = ~ arg0;
3141 break;
3143 case NEG:
3144 val = - arg0;
3145 break;
3147 case ABS:
3148 val = (arg0 >= 0 ? arg0 : - arg0);
3149 break;
3151 case FFS:
3152 /* Don't use ffs here. Instead, get low order bit and then its
3153 number. If arg0 is zero, this will return 0, as desired. */
3154 arg0 &= GET_MODE_MASK (mode);
3155 val = exact_log2 (arg0 & (- arg0)) + 1;
3156 break;
3158 case TRUNCATE:
3159 val = arg0;
3160 break;
3162 case ZERO_EXTEND:
3163 if (op_mode == VOIDmode)
3164 op_mode = mode;
3165 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3167 /* If we were really extending the mode,
3168 we would have to distinguish between zero-extension
3169 and sign-extension. */
3170 if (width != GET_MODE_BITSIZE (op_mode))
3171 abort ();
3172 val = arg0;
3174 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3175 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3176 else
3177 return 0;
3178 break;
3180 case SIGN_EXTEND:
3181 if (op_mode == VOIDmode)
3182 op_mode = mode;
3183 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3185 /* If we were really extending the mode,
3186 we would have to distinguish between zero-extension
3187 and sign-extension. */
3188 if (width != GET_MODE_BITSIZE (op_mode))
3189 abort ();
3190 val = arg0;
3192 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3195 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3196 if (val
3197 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
3198 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3200 else
3201 return 0;
3202 break;
3204 case SQRT:
3205 return 0;
3207 default:
3208 abort ();
3211 /* Clear the bits that don't belong in our mode,
3212 unless they and our sign bit are all one.
3213 So we get either a reasonable negative value or a reasonable
3214 unsigned value for this mode. */
3215 if (width < HOST_BITS_PER_WIDE_INT
3216 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3217 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3218 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3220 return GEN_INT (val);
3223 /* We can do some operations on integer CONST_DOUBLEs. Also allow
3224 for a DImode operation on a CONST_INT. */
3225 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
3226 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3228 HOST_WIDE_INT l1, h1, lv, hv;
3230 if (GET_CODE (op) == CONST_DOUBLE)
3231 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
3232 else
3233 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
3235 switch (code)
3237 case NOT:
3238 lv = ~ l1;
3239 hv = ~ h1;
3240 break;
3242 case NEG:
3243 neg_double (l1, h1, &lv, &hv);
3244 break;
3246 case ABS:
3247 if (h1 < 0)
3248 neg_double (l1, h1, &lv, &hv);
3249 else
3250 lv = l1, hv = h1;
3251 break;
3253 case FFS:
3254 hv = 0;
3255 if (l1 == 0)
3256 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
3257 else
3258 lv = exact_log2 (l1 & (-l1)) + 1;
3259 break;
3261 case TRUNCATE:
3262 /* This is just a change-of-mode, so do nothing. */
3263 lv = l1, hv = h1;
3264 break;
3266 case ZERO_EXTEND:
3267 if (op_mode == VOIDmode
3268 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3269 return 0;
3271 hv = 0;
3272 lv = l1 & GET_MODE_MASK (op_mode);
3273 break;
3275 case SIGN_EXTEND:
3276 if (op_mode == VOIDmode
3277 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3278 return 0;
3279 else
3281 lv = l1 & GET_MODE_MASK (op_mode);
3282 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
3283 && (lv & ((HOST_WIDE_INT) 1
3284 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
3285 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3287 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
3289 break;
3291 case SQRT:
3292 return 0;
3294 default:
3295 return 0;
3298 return immed_double_const (lv, hv, mode);
3301 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3302 else if (GET_CODE (op) == CONST_DOUBLE
3303 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3305 REAL_VALUE_TYPE d;
3306 jmp_buf handler;
3307 rtx x;
3309 if (setjmp (handler))
3310 /* There used to be a warning here, but that is inadvisable.
3311 People may want to cause traps, and the natural way
3312 to do it should not get a warning. */
3313 return 0;
3315 set_float_handler (handler);
3317 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3319 switch (code)
3321 case NEG:
3322 d = REAL_VALUE_NEGATE (d);
3323 break;
3325 case ABS:
3326 if (REAL_VALUE_NEGATIVE (d))
3327 d = REAL_VALUE_NEGATE (d);
3328 break;
3330 case FLOAT_TRUNCATE:
3331 d = real_value_truncate (mode, d);
3332 break;
3334 case FLOAT_EXTEND:
3335 /* All this does is change the mode. */
3336 break;
3338 case FIX:
3339 d = REAL_VALUE_RNDZINT (d);
3340 break;
3342 case UNSIGNED_FIX:
3343 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
3344 break;
3346 case SQRT:
3347 return 0;
3349 default:
3350 abort ();
3353 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3354 set_float_handler (NULL_PTR);
3355 return x;
3358 else if (GET_CODE (op) == CONST_DOUBLE
3359 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
3360 && GET_MODE_CLASS (mode) == MODE_INT
3361 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3363 REAL_VALUE_TYPE d;
3364 jmp_buf handler;
3365 HOST_WIDE_INT val;
3367 if (setjmp (handler))
3368 return 0;
3370 set_float_handler (handler);
3372 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3374 switch (code)
3376 case FIX:
3377 val = REAL_VALUE_FIX (d);
3378 break;
3380 case UNSIGNED_FIX:
3381 val = REAL_VALUE_UNSIGNED_FIX (d);
3382 break;
3384 default:
3385 abort ();
3388 set_float_handler (NULL_PTR);
3390 /* Clear the bits that don't belong in our mode,
3391 unless they and our sign bit are all one.
3392 So we get either a reasonable negative value or a reasonable
3393 unsigned value for this mode. */
3394 if (width < HOST_BITS_PER_WIDE_INT
3395 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3396 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3397 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3399 /* If this would be an entire word for the target, but is not for
3400 the host, then sign-extend on the host so that the number will look
3401 the same way on the host that it would on the target.
3403 For example, when building a 64 bit alpha hosted 32 bit sparc
3404 targeted compiler, then we want the 32 bit unsigned value -1 to be
3405 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
3406 The later confuses the sparc backend. */
3408 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
3409 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
3410 val |= ((HOST_WIDE_INT) (-1) << width);
3412 return GEN_INT (val);
3414 #endif
3415 /* This was formerly used only for non-IEEE float.
3416 eggert@twinsun.com says it is safe for IEEE also. */
3417 else
3419 /* There are some simplifications we can do even if the operands
3420 aren't constant. */
3421 switch (code)
3423 case NEG:
3424 case NOT:
3425 /* (not (not X)) == X, similarly for NEG. */
3426 if (GET_CODE (op) == code)
3427 return XEXP (op, 0);
3428 break;
3430 case SIGN_EXTEND:
3431 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
3432 becomes just the MINUS if its mode is MODE. This allows
3433 folding switch statements on machines using casesi (such as
3434 the Vax). */
3435 if (GET_CODE (op) == TRUNCATE
3436 && GET_MODE (XEXP (op, 0)) == mode
3437 && GET_CODE (XEXP (op, 0)) == MINUS
3438 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
3439 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
3440 return XEXP (op, 0);
3442 #ifdef POINTERS_EXTEND_UNSIGNED
3443 if (! POINTERS_EXTEND_UNSIGNED
3444 && mode == Pmode && GET_MODE (op) == ptr_mode
3445 && CONSTANT_P (op))
3446 return convert_memory_address (Pmode, op);
3447 #endif
3448 break;
3450 #ifdef POINTERS_EXTEND_UNSIGNED
3451 case ZERO_EXTEND:
3452 if (POINTERS_EXTEND_UNSIGNED
3453 && mode == Pmode && GET_MODE (op) == ptr_mode
3454 && CONSTANT_P (op))
3455 return convert_memory_address (Pmode, op);
3456 break;
3457 #endif
3459 default:
3460 break;
3463 return 0;
3467 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
3468 and OP1. Return 0 if no simplification is possible.
3470 Don't use this for relational operations such as EQ or LT.
3471 Use simplify_relational_operation instead. */
3474 simplify_binary_operation (code, mode, op0, op1)
3475 enum rtx_code code;
3476 enum machine_mode mode;
3477 rtx op0, op1;
3479 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3480 HOST_WIDE_INT val;
3481 int width = GET_MODE_BITSIZE (mode);
3482 rtx tem;
3484 /* Relational operations don't work here. We must know the mode
3485 of the operands in order to do the comparison correctly.
3486 Assuming a full word can give incorrect results.
3487 Consider comparing 128 with -128 in QImode. */
3489 if (GET_RTX_CLASS (code) == '<')
3490 abort ();
3492 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3493 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3494 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
3495 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3497 REAL_VALUE_TYPE f0, f1, value;
3498 jmp_buf handler;
3500 if (setjmp (handler))
3501 return 0;
3503 set_float_handler (handler);
3505 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3506 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3507 f0 = real_value_truncate (mode, f0);
3508 f1 = real_value_truncate (mode, f1);
3510 #ifdef REAL_ARITHMETIC
3511 #ifndef REAL_INFINITY
3512 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
3513 return 0;
3514 #endif
3515 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
3516 #else
3517 switch (code)
3519 case PLUS:
3520 value = f0 + f1;
3521 break;
3522 case MINUS:
3523 value = f0 - f1;
3524 break;
3525 case MULT:
3526 value = f0 * f1;
3527 break;
3528 case DIV:
3529 #ifndef REAL_INFINITY
3530 if (f1 == 0)
3531 return 0;
3532 #endif
3533 value = f0 / f1;
3534 break;
3535 case SMIN:
3536 value = MIN (f0, f1);
3537 break;
3538 case SMAX:
3539 value = MAX (f0, f1);
3540 break;
3541 default:
3542 abort ();
3544 #endif
3546 value = real_value_truncate (mode, value);
3547 set_float_handler (NULL_PTR);
3548 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
3550 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3552 /* We can fold some multi-word operations. */
3553 if (GET_MODE_CLASS (mode) == MODE_INT
3554 && width == HOST_BITS_PER_WIDE_INT * 2
3555 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3556 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3558 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
3560 if (GET_CODE (op0) == CONST_DOUBLE)
3561 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3562 else
3563 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
3565 if (GET_CODE (op1) == CONST_DOUBLE)
3566 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3567 else
3568 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
3570 switch (code)
3572 case MINUS:
3573 /* A - B == A + (-B). */
3574 neg_double (l2, h2, &lv, &hv);
3575 l2 = lv, h2 = hv;
3577 /* .. fall through ... */
3579 case PLUS:
3580 add_double (l1, h1, l2, h2, &lv, &hv);
3581 break;
3583 case MULT:
3584 mul_double (l1, h1, l2, h2, &lv, &hv);
3585 break;
3587 case DIV: case MOD: case UDIV: case UMOD:
3588 /* We'd need to include tree.h to do this and it doesn't seem worth
3589 it. */
3590 return 0;
3592 case AND:
3593 lv = l1 & l2, hv = h1 & h2;
3594 break;
3596 case IOR:
3597 lv = l1 | l2, hv = h1 | h2;
3598 break;
3600 case XOR:
3601 lv = l1 ^ l2, hv = h1 ^ h2;
3602 break;
3604 case SMIN:
3605 if (h1 < h2
3606 || (h1 == h2
3607 && ((unsigned HOST_WIDE_INT) l1
3608 < (unsigned HOST_WIDE_INT) l2)))
3609 lv = l1, hv = h1;
3610 else
3611 lv = l2, hv = h2;
3612 break;
3614 case SMAX:
3615 if (h1 > h2
3616 || (h1 == h2
3617 && ((unsigned HOST_WIDE_INT) l1
3618 > (unsigned HOST_WIDE_INT) l2)))
3619 lv = l1, hv = h1;
3620 else
3621 lv = l2, hv = h2;
3622 break;
3624 case UMIN:
3625 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3626 || (h1 == h2
3627 && ((unsigned HOST_WIDE_INT) l1
3628 < (unsigned HOST_WIDE_INT) l2)))
3629 lv = l1, hv = h1;
3630 else
3631 lv = l2, hv = h2;
3632 break;
3634 case UMAX:
3635 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3636 || (h1 == h2
3637 && ((unsigned HOST_WIDE_INT) l1
3638 > (unsigned HOST_WIDE_INT) l2)))
3639 lv = l1, hv = h1;
3640 else
3641 lv = l2, hv = h2;
3642 break;
3644 case LSHIFTRT: case ASHIFTRT:
3645 case ASHIFT:
3646 case ROTATE: case ROTATERT:
3647 #ifdef SHIFT_COUNT_TRUNCATED
3648 if (SHIFT_COUNT_TRUNCATED)
3649 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3650 #endif
3652 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
3653 return 0;
3655 if (code == LSHIFTRT || code == ASHIFTRT)
3656 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3657 code == ASHIFTRT);
3658 else if (code == ASHIFT)
3659 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3660 else if (code == ROTATE)
3661 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3662 else /* code == ROTATERT */
3663 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3664 break;
3666 default:
3667 return 0;
3670 return immed_double_const (lv, hv, mode);
3673 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
3674 || width > HOST_BITS_PER_WIDE_INT || width == 0)
3676 /* Even if we can't compute a constant result,
3677 there are some cases worth simplifying. */
3679 switch (code)
3681 case PLUS:
3682 /* In IEEE floating point, x+0 is not the same as x. Similarly
3683 for the other optimizations below. */
3684 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3685 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3686 break;
3688 if (op1 == CONST0_RTX (mode))
3689 return op0;
3691 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
3692 if (GET_CODE (op0) == NEG)
3693 return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
3694 else if (GET_CODE (op1) == NEG)
3695 return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
3697 /* Handle both-operands-constant cases. We can only add
3698 CONST_INTs to constants since the sum of relocatable symbols
3699 can't be handled by most assemblers. Don't add CONST_INT
3700 to CONST_INT since overflow won't be computed properly if wider
3701 than HOST_BITS_PER_WIDE_INT. */
3703 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
3704 && GET_CODE (op1) == CONST_INT)
3705 return plus_constant (op0, INTVAL (op1));
3706 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
3707 && GET_CODE (op0) == CONST_INT)
3708 return plus_constant (op1, INTVAL (op0));
3710 /* See if this is something like X * C - X or vice versa or
3711 if the multiplication is written as a shift. If so, we can
3712 distribute and make a new multiply, shift, or maybe just
3713 have X (if C is 2 in the example above). But don't make
3714 real multiply if we didn't have one before. */
3716 if (! FLOAT_MODE_P (mode))
3718 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3719 rtx lhs = op0, rhs = op1;
3720 int had_mult = 0;
3722 if (GET_CODE (lhs) == NEG)
3723 coeff0 = -1, lhs = XEXP (lhs, 0);
3724 else if (GET_CODE (lhs) == MULT
3725 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3727 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3728 had_mult = 1;
3730 else if (GET_CODE (lhs) == ASHIFT
3731 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3732 && INTVAL (XEXP (lhs, 1)) >= 0
3733 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3735 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3736 lhs = XEXP (lhs, 0);
3739 if (GET_CODE (rhs) == NEG)
3740 coeff1 = -1, rhs = XEXP (rhs, 0);
3741 else if (GET_CODE (rhs) == MULT
3742 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3744 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3745 had_mult = 1;
3747 else if (GET_CODE (rhs) == ASHIFT
3748 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3749 && INTVAL (XEXP (rhs, 1)) >= 0
3750 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3752 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3753 rhs = XEXP (rhs, 0);
3756 if (rtx_equal_p (lhs, rhs))
3758 tem = cse_gen_binary (MULT, mode, lhs,
3759 GEN_INT (coeff0 + coeff1));
3760 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3764 /* If one of the operands is a PLUS or a MINUS, see if we can
3765 simplify this by the associative law.
3766 Don't use the associative law for floating point.
3767 The inaccuracy makes it nonassociative,
3768 and subtle programs can break if operations are associated. */
3770 if (INTEGRAL_MODE_P (mode)
3771 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3772 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3773 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3774 return tem;
3775 break;
3777 case COMPARE:
3778 #ifdef HAVE_cc0
3779 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
3780 using cc0, in which case we want to leave it as a COMPARE
3781 so we can distinguish it from a register-register-copy.
3783 In IEEE floating point, x-0 is not the same as x. */
3785 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3786 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3787 && op1 == CONST0_RTX (mode))
3788 return op0;
3789 #else
3790 /* Do nothing here. */
3791 #endif
3792 break;
3794 case MINUS:
3795 /* None of these optimizations can be done for IEEE
3796 floating point. */
3797 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3798 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3799 break;
3801 /* We can't assume x-x is 0 even with non-IEEE floating point,
3802 but since it is zero except in very strange circumstances, we
3803 will treat it as zero with -ffast-math. */
3804 if (rtx_equal_p (op0, op1)
3805 && ! side_effects_p (op0)
3806 && (! FLOAT_MODE_P (mode) || flag_fast_math))
3807 return CONST0_RTX (mode);
3809 /* Change subtraction from zero into negation. */
3810 if (op0 == CONST0_RTX (mode))
3811 return gen_rtx (NEG, mode, op1);
3813 /* (-1 - a) is ~a. */
3814 if (op0 == constm1_rtx)
3815 return gen_rtx (NOT, mode, op1);
3817 /* Subtracting 0 has no effect. */
3818 if (op1 == CONST0_RTX (mode))
3819 return op0;
3821 /* See if this is something like X * C - X or vice versa or
3822 if the multiplication is written as a shift. If so, we can
3823 distribute and make a new multiply, shift, or maybe just
3824 have X (if C is 2 in the example above). But don't make
3825 real multiply if we didn't have one before. */
3827 if (! FLOAT_MODE_P (mode))
3829 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3830 rtx lhs = op0, rhs = op1;
3831 int had_mult = 0;
3833 if (GET_CODE (lhs) == NEG)
3834 coeff0 = -1, lhs = XEXP (lhs, 0);
3835 else if (GET_CODE (lhs) == MULT
3836 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3838 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3839 had_mult = 1;
3841 else if (GET_CODE (lhs) == ASHIFT
3842 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3843 && INTVAL (XEXP (lhs, 1)) >= 0
3844 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3846 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3847 lhs = XEXP (lhs, 0);
3850 if (GET_CODE (rhs) == NEG)
3851 coeff1 = - 1, rhs = XEXP (rhs, 0);
3852 else if (GET_CODE (rhs) == MULT
3853 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3855 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3856 had_mult = 1;
3858 else if (GET_CODE (rhs) == ASHIFT
3859 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3860 && INTVAL (XEXP (rhs, 1)) >= 0
3861 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3863 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3864 rhs = XEXP (rhs, 0);
3867 if (rtx_equal_p (lhs, rhs))
3869 tem = cse_gen_binary (MULT, mode, lhs,
3870 GEN_INT (coeff0 - coeff1));
3871 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3875 /* (a - (-b)) -> (a + b). */
3876 if (GET_CODE (op1) == NEG)
3877 return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3879 /* If one of the operands is a PLUS or a MINUS, see if we can
3880 simplify this by the associative law.
3881 Don't use the associative law for floating point.
3882 The inaccuracy makes it nonassociative,
3883 and subtle programs can break if operations are associated. */
3885 if (INTEGRAL_MODE_P (mode)
3886 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3887 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3888 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3889 return tem;
3891 /* Don't let a relocatable value get a negative coeff. */
3892 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
3893 return plus_constant (op0, - INTVAL (op1));
3895 /* (x - (x & y)) -> (x & ~y) */
3896 if (GET_CODE (op1) == AND)
3898 if (rtx_equal_p (op0, XEXP (op1, 0)))
3899 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 1)));
3900 if (rtx_equal_p (op0, XEXP (op1, 1)))
3901 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 0)));
3903 break;
3905 case MULT:
3906 if (op1 == constm1_rtx)
3908 tem = simplify_unary_operation (NEG, mode, op0, mode);
3910 return tem ? tem : gen_rtx (NEG, mode, op0);
3913 /* In IEEE floating point, x*0 is not always 0. */
3914 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3915 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3916 && op1 == CONST0_RTX (mode)
3917 && ! side_effects_p (op0))
3918 return op1;
3920 /* In IEEE floating point, x*1 is not equivalent to x for nans.
3921 However, ANSI says we can drop signals,
3922 so we can do this anyway. */
3923 if (op1 == CONST1_RTX (mode))
3924 return op0;
3926 /* Convert multiply by constant power of two into shift unless
3927 we are still generating RTL. This test is a kludge. */
3928 if (GET_CODE (op1) == CONST_INT
3929 && (val = exact_log2 (INTVAL (op1))) >= 0
3930 /* If the mode is larger than the host word size, and the
3931 uppermost bit is set, then this isn't a power of two due
3932 to implicit sign extension. */
3933 && (width <= HOST_BITS_PER_WIDE_INT
3934 || val != HOST_BITS_PER_WIDE_INT - 1)
3935 && ! rtx_equal_function_value_matters)
3936 return gen_rtx (ASHIFT, mode, op0, GEN_INT (val));
3938 if (GET_CODE (op1) == CONST_DOUBLE
3939 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
3941 REAL_VALUE_TYPE d;
3942 jmp_buf handler;
3943 int op1is2, op1ism1;
3945 if (setjmp (handler))
3946 return 0;
3948 set_float_handler (handler);
3949 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3950 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
3951 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
3952 set_float_handler (NULL_PTR);
3954 /* x*2 is x+x and x*(-1) is -x */
3955 if (op1is2 && GET_MODE (op0) == mode)
3956 return gen_rtx (PLUS, mode, op0, copy_rtx (op0));
3958 else if (op1ism1 && GET_MODE (op0) == mode)
3959 return gen_rtx (NEG, mode, op0);
3961 break;
3963 case IOR:
3964 if (op1 == const0_rtx)
3965 return op0;
3966 if (GET_CODE (op1) == CONST_INT
3967 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3968 return op1;
3969 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3970 return op0;
3971 /* A | (~A) -> -1 */
3972 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3973 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3974 && ! side_effects_p (op0)
3975 && GET_MODE_CLASS (mode) != MODE_CC)
3976 return constm1_rtx;
3977 break;
3979 case XOR:
3980 if (op1 == const0_rtx)
3981 return op0;
3982 if (GET_CODE (op1) == CONST_INT
3983 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3984 return gen_rtx (NOT, mode, op0);
3985 if (op0 == op1 && ! side_effects_p (op0)
3986 && GET_MODE_CLASS (mode) != MODE_CC)
3987 return const0_rtx;
3988 break;
3990 case AND:
3991 if (op1 == const0_rtx && ! side_effects_p (op0))
3992 return const0_rtx;
3993 if (GET_CODE (op1) == CONST_INT
3994 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3995 return op0;
3996 if (op0 == op1 && ! side_effects_p (op0)
3997 && GET_MODE_CLASS (mode) != MODE_CC)
3998 return op0;
3999 /* A & (~A) -> 0 */
4000 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
4001 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
4002 && ! side_effects_p (op0)
4003 && GET_MODE_CLASS (mode) != MODE_CC)
4004 return const0_rtx;
4005 break;
4007 case UDIV:
4008 /* Convert divide by power of two into shift (divide by 1 handled
4009 below). */
4010 if (GET_CODE (op1) == CONST_INT
4011 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
4012 return gen_rtx (LSHIFTRT, mode, op0, GEN_INT (arg1));
4014 /* ... fall through ... */
4016 case DIV:
4017 if (op1 == CONST1_RTX (mode))
4018 return op0;
4020 /* In IEEE floating point, 0/x is not always 0. */
4021 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4022 || ! FLOAT_MODE_P (mode) || flag_fast_math)
4023 && op0 == CONST0_RTX (mode)
4024 && ! side_effects_p (op1))
4025 return op0;
4027 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4028 /* Change division by a constant into multiplication. Only do
4029 this with -ffast-math until an expert says it is safe in
4030 general. */
4031 else if (GET_CODE (op1) == CONST_DOUBLE
4032 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
4033 && op1 != CONST0_RTX (mode)
4034 && flag_fast_math)
4036 REAL_VALUE_TYPE d;
4037 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
4039 if (! REAL_VALUES_EQUAL (d, dconst0))
4041 #if defined (REAL_ARITHMETIC)
4042 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
4043 return gen_rtx (MULT, mode, op0,
4044 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
4045 #else
4046 return gen_rtx (MULT, mode, op0,
4047 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
4048 #endif
4051 #endif
4052 break;
4054 case UMOD:
4055 /* Handle modulus by power of two (mod with 1 handled below). */
4056 if (GET_CODE (op1) == CONST_INT
4057 && exact_log2 (INTVAL (op1)) > 0)
4058 return gen_rtx (AND, mode, op0, GEN_INT (INTVAL (op1) - 1));
4060 /* ... fall through ... */
4062 case MOD:
4063 if ((op0 == const0_rtx || op1 == const1_rtx)
4064 && ! side_effects_p (op0) && ! side_effects_p (op1))
4065 return const0_rtx;
4066 break;
4068 case ROTATERT:
4069 case ROTATE:
4070 /* Rotating ~0 always results in ~0. */
4071 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
4072 && INTVAL (op0) == GET_MODE_MASK (mode)
4073 && ! side_effects_p (op1))
4074 return op0;
4076 /* ... fall through ... */
4078 case ASHIFT:
4079 case ASHIFTRT:
4080 case LSHIFTRT:
4081 if (op1 == const0_rtx)
4082 return op0;
4083 if (op0 == const0_rtx && ! side_effects_p (op1))
4084 return op0;
4085 break;
4087 case SMIN:
4088 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4089 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
4090 && ! side_effects_p (op0))
4091 return op1;
4092 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4093 return op0;
4094 break;
4096 case SMAX:
4097 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4098 && (INTVAL (op1)
4099 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
4100 && ! side_effects_p (op0))
4101 return op1;
4102 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4103 return op0;
4104 break;
4106 case UMIN:
4107 if (op1 == const0_rtx && ! side_effects_p (op0))
4108 return op1;
4109 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4110 return op0;
4111 break;
4113 case UMAX:
4114 if (op1 == constm1_rtx && ! side_effects_p (op0))
4115 return op1;
4116 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4117 return op0;
4118 break;
4120 default:
4121 abort ();
4124 return 0;
4127 /* Get the integer argument values in two forms:
4128 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4130 arg0 = INTVAL (op0);
4131 arg1 = INTVAL (op1);
4133 if (width < HOST_BITS_PER_WIDE_INT)
4135 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
4136 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
4138 arg0s = arg0;
4139 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4140 arg0s |= ((HOST_WIDE_INT) (-1) << width);
4142 arg1s = arg1;
4143 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4144 arg1s |= ((HOST_WIDE_INT) (-1) << width);
4146 else
4148 arg0s = arg0;
4149 arg1s = arg1;
4152 /* Compute the value of the arithmetic. */
4154 switch (code)
4156 case PLUS:
4157 val = arg0s + arg1s;
4158 break;
4160 case MINUS:
4161 val = arg0s - arg1s;
4162 break;
4164 case MULT:
4165 val = arg0s * arg1s;
4166 break;
4168 case DIV:
4169 if (arg1s == 0)
4170 return 0;
4171 val = arg0s / arg1s;
4172 break;
4174 case MOD:
4175 if (arg1s == 0)
4176 return 0;
4177 val = arg0s % arg1s;
4178 break;
4180 case UDIV:
4181 if (arg1 == 0)
4182 return 0;
4183 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4184 break;
4186 case UMOD:
4187 if (arg1 == 0)
4188 return 0;
4189 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4190 break;
4192 case AND:
4193 val = arg0 & arg1;
4194 break;
4196 case IOR:
4197 val = arg0 | arg1;
4198 break;
4200 case XOR:
4201 val = arg0 ^ arg1;
4202 break;
4204 case LSHIFTRT:
4205 /* If shift count is undefined, don't fold it; let the machine do
4206 what it wants. But truncate it if the machine will do that. */
4207 if (arg1 < 0)
4208 return 0;
4210 #ifdef SHIFT_COUNT_TRUNCATED
4211 if (SHIFT_COUNT_TRUNCATED)
4212 arg1 %= width;
4213 #endif
4215 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
4216 break;
4218 case ASHIFT:
4219 if (arg1 < 0)
4220 return 0;
4222 #ifdef SHIFT_COUNT_TRUNCATED
4223 if (SHIFT_COUNT_TRUNCATED)
4224 arg1 %= width;
4225 #endif
4227 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
4228 break;
4230 case ASHIFTRT:
4231 if (arg1 < 0)
4232 return 0;
4234 #ifdef SHIFT_COUNT_TRUNCATED
4235 if (SHIFT_COUNT_TRUNCATED)
4236 arg1 %= width;
4237 #endif
4239 val = arg0s >> arg1;
4241 /* Bootstrap compiler may not have sign extended the right shift.
4242 Manually extend the sign to insure bootstrap cc matches gcc. */
4243 if (arg0s < 0 && arg1 > 0)
4244 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
4246 break;
4248 case ROTATERT:
4249 if (arg1 < 0)
4250 return 0;
4252 arg1 %= width;
4253 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4254 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4255 break;
4257 case ROTATE:
4258 if (arg1 < 0)
4259 return 0;
4261 arg1 %= width;
4262 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4263 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4264 break;
4266 case COMPARE:
4267 /* Do nothing here. */
4268 return 0;
4270 case SMIN:
4271 val = arg0s <= arg1s ? arg0s : arg1s;
4272 break;
4274 case UMIN:
4275 val = ((unsigned HOST_WIDE_INT) arg0
4276 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4277 break;
4279 case SMAX:
4280 val = arg0s > arg1s ? arg0s : arg1s;
4281 break;
4283 case UMAX:
4284 val = ((unsigned HOST_WIDE_INT) arg0
4285 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4286 break;
4288 default:
4289 abort ();
4292 /* Clear the bits that don't belong in our mode, unless they and our sign
4293 bit are all one. So we get either a reasonable negative value or a
4294 reasonable unsigned value for this mode. */
4295 if (width < HOST_BITS_PER_WIDE_INT
4296 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4297 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4298 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4300 /* If this would be an entire word for the target, but is not for
4301 the host, then sign-extend on the host so that the number will look
4302 the same way on the host that it would on the target.
4304 For example, when building a 64 bit alpha hosted 32 bit sparc
4305 targeted compiler, then we want the 32 bit unsigned value -1 to be
4306 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
4307 The later confuses the sparc backend. */
4309 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
4310 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
4311 val |= ((HOST_WIDE_INT) (-1) << width);
4313 return GEN_INT (val);
4316 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4317 PLUS or MINUS.
4319 Rather than test for specific case, we do this by a brute-force method
4320 and do all possible simplifications until no more changes occur. Then
4321 we rebuild the operation. */
4323 static rtx
4324 simplify_plus_minus (code, mode, op0, op1)
4325 enum rtx_code code;
4326 enum machine_mode mode;
4327 rtx op0, op1;
4329 rtx ops[8];
4330 int negs[8];
4331 rtx result, tem;
4332 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
4333 int first = 1, negate = 0, changed;
4334 int i, j;
4336 bzero ((char *) ops, sizeof ops);
4338 /* Set up the two operands and then expand them until nothing has been
4339 changed. If we run out of room in our array, give up; this should
4340 almost never happen. */
4342 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
4344 changed = 1;
4345 while (changed)
4347 changed = 0;
4349 for (i = 0; i < n_ops; i++)
4350 switch (GET_CODE (ops[i]))
4352 case PLUS:
4353 case MINUS:
4354 if (n_ops == 7)
4355 return 0;
4357 ops[n_ops] = XEXP (ops[i], 1);
4358 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
4359 ops[i] = XEXP (ops[i], 0);
4360 input_ops++;
4361 changed = 1;
4362 break;
4364 case NEG:
4365 ops[i] = XEXP (ops[i], 0);
4366 negs[i] = ! negs[i];
4367 changed = 1;
4368 break;
4370 case CONST:
4371 ops[i] = XEXP (ops[i], 0);
4372 input_consts++;
4373 changed = 1;
4374 break;
4376 case NOT:
4377 /* ~a -> (-a - 1) */
4378 if (n_ops != 7)
4380 ops[n_ops] = constm1_rtx;
4381 negs[n_ops++] = negs[i];
4382 ops[i] = XEXP (ops[i], 0);
4383 negs[i] = ! negs[i];
4384 changed = 1;
4386 break;
4388 case CONST_INT:
4389 if (negs[i])
4390 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
4391 break;
4393 default:
4394 break;
4398 /* If we only have two operands, we can't do anything. */
4399 if (n_ops <= 2)
4400 return 0;
4402 /* Now simplify each pair of operands until nothing changes. The first
4403 time through just simplify constants against each other. */
4405 changed = 1;
4406 while (changed)
4408 changed = first;
4410 for (i = 0; i < n_ops - 1; i++)
4411 for (j = i + 1; j < n_ops; j++)
4412 if (ops[i] != 0 && ops[j] != 0
4413 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
4415 rtx lhs = ops[i], rhs = ops[j];
4416 enum rtx_code ncode = PLUS;
4418 if (negs[i] && ! negs[j])
4419 lhs = ops[j], rhs = ops[i], ncode = MINUS;
4420 else if (! negs[i] && negs[j])
4421 ncode = MINUS;
4423 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4424 if (tem)
4426 ops[i] = tem, ops[j] = 0;
4427 negs[i] = negs[i] && negs[j];
4428 if (GET_CODE (tem) == NEG)
4429 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
4431 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
4432 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
4433 changed = 1;
4437 first = 0;
4440 /* Pack all the operands to the lower-numbered entries and give up if
4441 we didn't reduce the number of operands we had. Make sure we
4442 count a CONST as two operands. If we have the same number of
4443 operands, but have made more CONSTs than we had, this is also
4444 an improvement, so accept it. */
4446 for (i = 0, j = 0; j < n_ops; j++)
4447 if (ops[j] != 0)
4449 ops[i] = ops[j], negs[i++] = negs[j];
4450 if (GET_CODE (ops[j]) == CONST)
4451 n_consts++;
4454 if (i + n_consts > input_ops
4455 || (i + n_consts == input_ops && n_consts <= input_consts))
4456 return 0;
4458 n_ops = i;
4460 /* If we have a CONST_INT, put it last. */
4461 for (i = 0; i < n_ops - 1; i++)
4462 if (GET_CODE (ops[i]) == CONST_INT)
4464 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
4465 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
4468 /* Put a non-negated operand first. If there aren't any, make all
4469 operands positive and negate the whole thing later. */
4470 for (i = 0; i < n_ops && negs[i]; i++)
4473 if (i == n_ops)
4475 for (i = 0; i < n_ops; i++)
4476 negs[i] = 0;
4477 negate = 1;
4479 else if (i != 0)
4481 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
4482 j = negs[0], negs[0] = negs[i], negs[i] = j;
4485 /* Now make the result by performing the requested operations. */
4486 result = ops[0];
4487 for (i = 1; i < n_ops; i++)
4488 result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
4490 return negate ? gen_rtx (NEG, mode, result) : result;
4493 /* Make a binary operation by properly ordering the operands and
4494 seeing if the expression folds. */
4496 static rtx
4497 cse_gen_binary (code, mode, op0, op1)
4498 enum rtx_code code;
4499 enum machine_mode mode;
4500 rtx op0, op1;
4502 rtx tem;
4504 /* Put complex operands first and constants second if commutative. */
4505 if (GET_RTX_CLASS (code) == 'c'
4506 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
4507 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
4508 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
4509 || (GET_CODE (op0) == SUBREG
4510 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
4511 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
4512 tem = op0, op0 = op1, op1 = tem;
4514 /* If this simplifies, do it. */
4515 tem = simplify_binary_operation (code, mode, op0, op1);
4517 if (tem)
4518 return tem;
4520 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
4521 just form the operation. */
4523 if (code == PLUS && GET_CODE (op1) == CONST_INT
4524 && GET_MODE (op0) != VOIDmode)
4525 return plus_constant (op0, INTVAL (op1));
4526 else if (code == MINUS && GET_CODE (op1) == CONST_INT
4527 && GET_MODE (op0) != VOIDmode)
4528 return plus_constant (op0, - INTVAL (op1));
4529 else
4530 return gen_rtx (code, mode, op0, op1);
4533 /* Like simplify_binary_operation except used for relational operators.
4534 MODE is the mode of the operands, not that of the result. If MODE
4535 is VOIDmode, both operands must also be VOIDmode and we compare the
4536 operands in "infinite precision".
4538 If no simplification is possible, this function returns zero. Otherwise,
4539 it returns either const_true_rtx or const0_rtx. */
4542 simplify_relational_operation (code, mode, op0, op1)
4543 enum rtx_code code;
4544 enum machine_mode mode;
4545 rtx op0, op1;
4547 int equal, op0lt, op0ltu, op1lt, op1ltu;
4548 rtx tem;
4550 /* If op0 is a compare, extract the comparison arguments from it. */
4551 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4552 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
4554 /* We can't simplify MODE_CC values since we don't know what the
4555 actual comparison is. */
4556 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
4557 #ifdef HAVE_cc0
4558 || op0 == cc0_rtx
4559 #endif
4561 return 0;
4563 /* For integer comparisons of A and B maybe we can simplify A - B and can
4564 then simplify a comparison of that with zero. If A and B are both either
4565 a register or a CONST_INT, this can't help; testing for these cases will
4566 prevent infinite recursion here and speed things up.
4568 If CODE is an unsigned comparison, then we can never do this optimization,
4569 because it gives an incorrect result if the subtraction wraps around zero.
4570 ANSI C defines unsigned operations such that they never overflow, and
4571 thus such cases can not be ignored. */
4573 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
4574 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
4575 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
4576 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4577 && code != GTU && code != GEU && code != LTU && code != LEU)
4578 return simplify_relational_operation (signed_condition (code),
4579 mode, tem, const0_rtx);
4581 /* For non-IEEE floating-point, if the two operands are equal, we know the
4582 result. */
4583 if (rtx_equal_p (op0, op1)
4584 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4585 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
4586 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
4588 /* If the operands are floating-point constants, see if we can fold
4589 the result. */
4590 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4591 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
4592 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
4594 REAL_VALUE_TYPE d0, d1;
4595 jmp_buf handler;
4597 if (setjmp (handler))
4598 return 0;
4600 set_float_handler (handler);
4601 REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
4602 REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
4603 equal = REAL_VALUES_EQUAL (d0, d1);
4604 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
4605 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
4606 set_float_handler (NULL_PTR);
4608 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4610 /* Otherwise, see if the operands are both integers. */
4611 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4612 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
4613 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
4615 int width = GET_MODE_BITSIZE (mode);
4616 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4617 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4619 /* Get the two words comprising each integer constant. */
4620 if (GET_CODE (op0) == CONST_DOUBLE)
4622 l0u = l0s = CONST_DOUBLE_LOW (op0);
4623 h0u = h0s = CONST_DOUBLE_HIGH (op0);
4625 else
4627 l0u = l0s = INTVAL (op0);
4628 h0u = h0s = l0s < 0 ? -1 : 0;
4631 if (GET_CODE (op1) == CONST_DOUBLE)
4633 l1u = l1s = CONST_DOUBLE_LOW (op1);
4634 h1u = h1s = CONST_DOUBLE_HIGH (op1);
4636 else
4638 l1u = l1s = INTVAL (op1);
4639 h1u = h1s = l1s < 0 ? -1 : 0;
4642 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4643 we have to sign or zero-extend the values. */
4644 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4645 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
4647 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4649 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4650 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4652 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4653 l0s |= ((HOST_WIDE_INT) (-1) << width);
4655 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4656 l1s |= ((HOST_WIDE_INT) (-1) << width);
4659 equal = (h0u == h1u && l0u == l1u);
4660 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
4661 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
4662 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4663 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4666 /* Otherwise, there are some code-specific tests we can make. */
4667 else
4669 switch (code)
4671 case EQ:
4672 /* References to the frame plus a constant or labels cannot
4673 be zero, but a SYMBOL_REF can due to #pragma weak. */
4674 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4675 || GET_CODE (op0) == LABEL_REF)
4676 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4677 /* On some machines, the ap reg can be 0 sometimes. */
4678 && op0 != arg_pointer_rtx
4679 #endif
4681 return const0_rtx;
4682 break;
4684 case NE:
4685 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4686 || GET_CODE (op0) == LABEL_REF)
4687 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4688 && op0 != arg_pointer_rtx
4689 #endif
4691 return const_true_rtx;
4692 break;
4694 case GEU:
4695 /* Unsigned values are never negative. */
4696 if (op1 == const0_rtx)
4697 return const_true_rtx;
4698 break;
4700 case LTU:
4701 if (op1 == const0_rtx)
4702 return const0_rtx;
4703 break;
4705 case LEU:
4706 /* Unsigned values are never greater than the largest
4707 unsigned value. */
4708 if (GET_CODE (op1) == CONST_INT
4709 && INTVAL (op1) == GET_MODE_MASK (mode)
4710 && INTEGRAL_MODE_P (mode))
4711 return const_true_rtx;
4712 break;
4714 case GTU:
4715 if (GET_CODE (op1) == CONST_INT
4716 && INTVAL (op1) == GET_MODE_MASK (mode)
4717 && INTEGRAL_MODE_P (mode))
4718 return const0_rtx;
4719 break;
4721 default:
4722 break;
4725 return 0;
4728 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4729 as appropriate. */
4730 switch (code)
4732 case EQ:
4733 return equal ? const_true_rtx : const0_rtx;
4734 case NE:
4735 return ! equal ? const_true_rtx : const0_rtx;
4736 case LT:
4737 return op0lt ? const_true_rtx : const0_rtx;
4738 case GT:
4739 return op1lt ? const_true_rtx : const0_rtx;
4740 case LTU:
4741 return op0ltu ? const_true_rtx : const0_rtx;
4742 case GTU:
4743 return op1ltu ? const_true_rtx : const0_rtx;
4744 case LE:
4745 return equal || op0lt ? const_true_rtx : const0_rtx;
4746 case GE:
4747 return equal || op1lt ? const_true_rtx : const0_rtx;
4748 case LEU:
4749 return equal || op0ltu ? const_true_rtx : const0_rtx;
4750 case GEU:
4751 return equal || op1ltu ? const_true_rtx : const0_rtx;
4752 default:
4753 abort ();
4757 /* Simplify CODE, an operation with result mode MODE and three operands,
4758 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4759 a constant. Return 0 if no simplifications is possible. */
4762 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
4763 enum rtx_code code;
4764 enum machine_mode mode, op0_mode;
4765 rtx op0, op1, op2;
4767 int width = GET_MODE_BITSIZE (mode);
4769 /* VOIDmode means "infinite" precision. */
4770 if (width == 0)
4771 width = HOST_BITS_PER_WIDE_INT;
4773 switch (code)
4775 case SIGN_EXTRACT:
4776 case ZERO_EXTRACT:
4777 if (GET_CODE (op0) == CONST_INT
4778 && GET_CODE (op1) == CONST_INT
4779 && GET_CODE (op2) == CONST_INT
4780 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
4781 && width <= HOST_BITS_PER_WIDE_INT)
4783 /* Extracting a bit-field from a constant */
4784 HOST_WIDE_INT val = INTVAL (op0);
4786 if (BITS_BIG_ENDIAN)
4787 val >>= (GET_MODE_BITSIZE (op0_mode)
4788 - INTVAL (op2) - INTVAL (op1));
4789 else
4790 val >>= INTVAL (op2);
4792 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4794 /* First zero-extend. */
4795 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4796 /* If desired, propagate sign bit. */
4797 if (code == SIGN_EXTRACT
4798 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4799 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4802 /* Clear the bits that don't belong in our mode,
4803 unless they and our sign bit are all one.
4804 So we get either a reasonable negative value or a reasonable
4805 unsigned value for this mode. */
4806 if (width < HOST_BITS_PER_WIDE_INT
4807 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4808 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4809 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4811 return GEN_INT (val);
4813 break;
4815 case IF_THEN_ELSE:
4816 if (GET_CODE (op0) == CONST_INT)
4817 return op0 != const0_rtx ? op1 : op2;
4819 /* Convert a == b ? b : a to "a". */
4820 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
4821 && rtx_equal_p (XEXP (op0, 0), op1)
4822 && rtx_equal_p (XEXP (op0, 1), op2))
4823 return op1;
4824 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
4825 && rtx_equal_p (XEXP (op0, 1), op1)
4826 && rtx_equal_p (XEXP (op0, 0), op2))
4827 return op2;
4828 break;
4830 default:
4831 abort ();
4834 return 0;
4837 /* If X is a nontrivial arithmetic operation on an argument
4838 for which a constant value can be determined, return
4839 the result of operating on that value, as a constant.
4840 Otherwise, return X, possibly with one or more operands
4841 modified by recursive calls to this function.
4843 If X is a register whose contents are known, we do NOT
4844 return those contents here. equiv_constant is called to
4845 perform that task.
4847 INSN is the insn that we may be modifying. If it is 0, make a copy
4848 of X before modifying it. */
4850 static rtx
4851 fold_rtx (x, insn)
4852 rtx x;
4853 rtx insn;
4855 register enum rtx_code code;
4856 register enum machine_mode mode;
4857 register char *fmt;
4858 register int i;
4859 rtx new = 0;
4860 int copied = 0;
4861 int must_swap = 0;
4863 /* Folded equivalents of first two operands of X. */
4864 rtx folded_arg0;
4865 rtx folded_arg1;
4867 /* Constant equivalents of first three operands of X;
4868 0 when no such equivalent is known. */
4869 rtx const_arg0;
4870 rtx const_arg1;
4871 rtx const_arg2;
4873 /* The mode of the first operand of X. We need this for sign and zero
4874 extends. */
4875 enum machine_mode mode_arg0;
4877 if (x == 0)
4878 return x;
4880 mode = GET_MODE (x);
4881 code = GET_CODE (x);
4882 switch (code)
4884 case CONST:
4885 case CONST_INT:
4886 case CONST_DOUBLE:
4887 case SYMBOL_REF:
4888 case LABEL_REF:
4889 case REG:
4890 /* No use simplifying an EXPR_LIST
4891 since they are used only for lists of args
4892 in a function call's REG_EQUAL note. */
4893 case EXPR_LIST:
4894 /* Changing anything inside an ADDRESSOF is incorrect; we don't
4895 want to (e.g.,) make (addressof (const_int 0)) just because
4896 the location is known to be zero. */
4897 case ADDRESSOF:
4898 return x;
4900 #ifdef HAVE_cc0
4901 case CC0:
4902 return prev_insn_cc0;
4903 #endif
4905 case PC:
4906 /* If the next insn is a CODE_LABEL followed by a jump table,
4907 PC's value is a LABEL_REF pointing to that label. That
4908 lets us fold switch statements on the Vax. */
4909 if (insn && GET_CODE (insn) == JUMP_INSN)
4911 rtx next = next_nonnote_insn (insn);
4913 if (next && GET_CODE (next) == CODE_LABEL
4914 && NEXT_INSN (next) != 0
4915 && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
4916 && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
4917 || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
4918 return gen_rtx (LABEL_REF, Pmode, next);
4920 break;
4922 case SUBREG:
4923 /* See if we previously assigned a constant value to this SUBREG. */
4924 if ((new = lookup_as_function (x, CONST_INT)) != 0
4925 || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
4926 return new;
4928 /* If this is a paradoxical SUBREG, we have no idea what value the
4929 extra bits would have. However, if the operand is equivalent
4930 to a SUBREG whose operand is the same as our mode, and all the
4931 modes are within a word, we can just use the inner operand
4932 because these SUBREGs just say how to treat the register.
4934 Similarly if we find an integer constant. */
4936 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4938 enum machine_mode imode = GET_MODE (SUBREG_REG (x));
4939 struct table_elt *elt;
4941 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
4942 && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
4943 && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
4944 imode)) != 0)
4945 for (elt = elt->first_same_value;
4946 elt; elt = elt->next_same_value)
4948 if (CONSTANT_P (elt->exp)
4949 && GET_MODE (elt->exp) == VOIDmode)
4950 return elt->exp;
4952 if (GET_CODE (elt->exp) == SUBREG
4953 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4954 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4955 return copy_rtx (SUBREG_REG (elt->exp));
4958 return x;
4961 /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
4962 We might be able to if the SUBREG is extracting a single word in an
4963 integral mode or extracting the low part. */
4965 folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
4966 const_arg0 = equiv_constant (folded_arg0);
4967 if (const_arg0)
4968 folded_arg0 = const_arg0;
4970 if (folded_arg0 != SUBREG_REG (x))
4972 new = 0;
4974 if (GET_MODE_CLASS (mode) == MODE_INT
4975 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
4976 && GET_MODE (SUBREG_REG (x)) != VOIDmode)
4977 new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
4978 GET_MODE (SUBREG_REG (x)));
4979 if (new == 0 && subreg_lowpart_p (x))
4980 new = gen_lowpart_if_possible (mode, folded_arg0);
4981 if (new)
4982 return new;
4985 /* If this is a narrowing SUBREG and our operand is a REG, see if
4986 we can find an equivalence for REG that is an arithmetic operation
4987 in a wider mode where both operands are paradoxical SUBREGs
4988 from objects of our result mode. In that case, we couldn't report
4989 an equivalent value for that operation, since we don't know what the
4990 extra bits will be. But we can find an equivalence for this SUBREG
4991 by folding that operation is the narrow mode. This allows us to
4992 fold arithmetic in narrow modes when the machine only supports
4993 word-sized arithmetic.
4995 Also look for a case where we have a SUBREG whose operand is the
4996 same as our result. If both modes are smaller than a word, we
4997 are simply interpreting a register in different modes and we
4998 can use the inner value. */
5000 if (GET_CODE (folded_arg0) == REG
5001 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
5002 && subreg_lowpart_p (x))
5004 struct table_elt *elt;
5006 /* We can use HASH here since we know that canon_hash won't be
5007 called. */
5008 elt = lookup (folded_arg0,
5009 HASH (folded_arg0, GET_MODE (folded_arg0)),
5010 GET_MODE (folded_arg0));
5012 if (elt)
5013 elt = elt->first_same_value;
5015 for (; elt; elt = elt->next_same_value)
5017 enum rtx_code eltcode = GET_CODE (elt->exp);
5019 /* Just check for unary and binary operations. */
5020 if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
5021 && GET_CODE (elt->exp) != SIGN_EXTEND
5022 && GET_CODE (elt->exp) != ZERO_EXTEND
5023 && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
5024 && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
5026 rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
5028 if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
5029 op0 = fold_rtx (op0, NULL_RTX);
5031 op0 = equiv_constant (op0);
5032 if (op0)
5033 new = simplify_unary_operation (GET_CODE (elt->exp), mode,
5034 op0, mode);
5036 else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
5037 || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
5038 && eltcode != DIV && eltcode != MOD
5039 && eltcode != UDIV && eltcode != UMOD
5040 && eltcode != ASHIFTRT && eltcode != LSHIFTRT
5041 && eltcode != ROTATE && eltcode != ROTATERT
5042 && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
5043 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
5044 == mode))
5045 || CONSTANT_P (XEXP (elt->exp, 0)))
5046 && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
5047 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
5048 == mode))
5049 || CONSTANT_P (XEXP (elt->exp, 1))))
5051 rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
5052 rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
5054 if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
5055 op0 = fold_rtx (op0, NULL_RTX);
5057 if (op0)
5058 op0 = equiv_constant (op0);
5060 if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
5061 op1 = fold_rtx (op1, NULL_RTX);
5063 if (op1)
5064 op1 = equiv_constant (op1);
5066 /* If we are looking for the low SImode part of
5067 (ashift:DI c (const_int 32)), it doesn't work
5068 to compute that in SImode, because a 32-bit shift
5069 in SImode is unpredictable. We know the value is 0. */
5070 if (op0 && op1
5071 && GET_CODE (elt->exp) == ASHIFT
5072 && GET_CODE (op1) == CONST_INT
5073 && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
5075 if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
5077 /* If the count fits in the inner mode's width,
5078 but exceeds the outer mode's width,
5079 the value will get truncated to 0
5080 by the subreg. */
5081 new = const0_rtx;
5082 else
5083 /* If the count exceeds even the inner mode's width,
5084 don't fold this expression. */
5085 new = 0;
5087 else if (op0 && op1)
5088 new = simplify_binary_operation (GET_CODE (elt->exp), mode,
5089 op0, op1);
5092 else if (GET_CODE (elt->exp) == SUBREG
5093 && GET_MODE (SUBREG_REG (elt->exp)) == mode
5094 && (GET_MODE_SIZE (GET_MODE (folded_arg0))
5095 <= UNITS_PER_WORD)
5096 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
5097 new = copy_rtx (SUBREG_REG (elt->exp));
5099 if (new)
5100 return new;
5104 return x;
5106 case NOT:
5107 case NEG:
5108 /* If we have (NOT Y), see if Y is known to be (NOT Z).
5109 If so, (NOT Y) simplifies to Z. Similarly for NEG. */
5110 new = lookup_as_function (XEXP (x, 0), code);
5111 if (new)
5112 return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
5113 break;
5115 case MEM:
5116 /* If we are not actually processing an insn, don't try to find the
5117 best address. Not only don't we care, but we could modify the
5118 MEM in an invalid way since we have no insn to validate against. */
5119 if (insn != 0)
5120 find_best_addr (insn, &XEXP (x, 0));
5123 /* Even if we don't fold in the insn itself,
5124 we can safely do so here, in hopes of getting a constant. */
5125 rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
5126 rtx base = 0;
5127 HOST_WIDE_INT offset = 0;
5129 if (GET_CODE (addr) == REG
5130 && REGNO_QTY_VALID_P (REGNO (addr))
5131 && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
5132 && qty_const[reg_qty[REGNO (addr)]] != 0)
5133 addr = qty_const[reg_qty[REGNO (addr)]];
5135 /* If address is constant, split it into a base and integer offset. */
5136 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
5137 base = addr;
5138 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5139 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5141 base = XEXP (XEXP (addr, 0), 0);
5142 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
5144 else if (GET_CODE (addr) == LO_SUM
5145 && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
5146 base = XEXP (addr, 1);
5147 else if (GET_CODE (addr) == ADDRESSOF)
5148 return change_address (x, VOIDmode, addr);
5150 /* If this is a constant pool reference, we can fold it into its
5151 constant to allow better value tracking. */
5152 if (base && GET_CODE (base) == SYMBOL_REF
5153 && CONSTANT_POOL_ADDRESS_P (base))
5155 rtx constant = get_pool_constant (base);
5156 enum machine_mode const_mode = get_pool_mode (base);
5157 rtx new;
5159 if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
5160 constant_pool_entries_cost = COST (constant);
5162 /* If we are loading the full constant, we have an equivalence. */
5163 if (offset == 0 && mode == const_mode)
5164 return constant;
5166 /* If this actually isn't a constant (weird!), we can't do
5167 anything. Otherwise, handle the two most common cases:
5168 extracting a word from a multi-word constant, and extracting
5169 the low-order bits. Other cases don't seem common enough to
5170 worry about. */
5171 if (! CONSTANT_P (constant))
5172 return x;
5174 if (GET_MODE_CLASS (mode) == MODE_INT
5175 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
5176 && offset % UNITS_PER_WORD == 0
5177 && (new = operand_subword (constant,
5178 offset / UNITS_PER_WORD,
5179 0, const_mode)) != 0)
5180 return new;
5182 if (((BYTES_BIG_ENDIAN
5183 && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
5184 || (! BYTES_BIG_ENDIAN && offset == 0))
5185 && (new = gen_lowpart_if_possible (mode, constant)) != 0)
5186 return new;
5189 /* If this is a reference to a label at a known position in a jump
5190 table, we also know its value. */
5191 if (base && GET_CODE (base) == LABEL_REF)
5193 rtx label = XEXP (base, 0);
5194 rtx table_insn = NEXT_INSN (label);
5196 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5197 && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
5199 rtx table = PATTERN (table_insn);
5201 if (offset >= 0
5202 && (offset / GET_MODE_SIZE (GET_MODE (table))
5203 < XVECLEN (table, 0)))
5204 return XVECEXP (table, 0,
5205 offset / GET_MODE_SIZE (GET_MODE (table)));
5207 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5208 && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
5210 rtx table = PATTERN (table_insn);
5212 if (offset >= 0
5213 && (offset / GET_MODE_SIZE (GET_MODE (table))
5214 < XVECLEN (table, 1)))
5216 offset /= GET_MODE_SIZE (GET_MODE (table));
5217 new = gen_rtx (MINUS, Pmode, XVECEXP (table, 1, offset),
5218 XEXP (table, 0));
5220 if (GET_MODE (table) != Pmode)
5221 new = gen_rtx (TRUNCATE, GET_MODE (table), new);
5223 /* Indicate this is a constant. This isn't a
5224 valid form of CONST, but it will only be used
5225 to fold the next insns and then discarded, so
5226 it should be safe. */
5227 return gen_rtx (CONST, GET_MODE (new), new);
5232 return x;
5235 case ASM_OPERANDS:
5236 for (i = XVECLEN (x, 3) - 1; i >= 0; i--)
5237 validate_change (insn, &XVECEXP (x, 3, i),
5238 fold_rtx (XVECEXP (x, 3, i), insn), 0);
5239 break;
5241 default:
5242 break;
5245 const_arg0 = 0;
5246 const_arg1 = 0;
5247 const_arg2 = 0;
5248 mode_arg0 = VOIDmode;
5250 /* Try folding our operands.
5251 Then see which ones have constant values known. */
5253 fmt = GET_RTX_FORMAT (code);
5254 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5255 if (fmt[i] == 'e')
5257 rtx arg = XEXP (x, i);
5258 rtx folded_arg = arg, const_arg = 0;
5259 enum machine_mode mode_arg = GET_MODE (arg);
5260 rtx cheap_arg, expensive_arg;
5261 rtx replacements[2];
5262 int j;
5264 /* Most arguments are cheap, so handle them specially. */
5265 switch (GET_CODE (arg))
5267 case REG:
5268 /* This is the same as calling equiv_constant; it is duplicated
5269 here for speed. */
5270 if (REGNO_QTY_VALID_P (REGNO (arg))
5271 && qty_const[reg_qty[REGNO (arg)]] != 0
5272 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
5273 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
5274 const_arg
5275 = gen_lowpart_if_possible (GET_MODE (arg),
5276 qty_const[reg_qty[REGNO (arg)]]);
5277 break;
5279 case CONST:
5280 case CONST_INT:
5281 case SYMBOL_REF:
5282 case LABEL_REF:
5283 case CONST_DOUBLE:
5284 const_arg = arg;
5285 break;
5287 #ifdef HAVE_cc0
5288 case CC0:
5289 folded_arg = prev_insn_cc0;
5290 mode_arg = prev_insn_cc0_mode;
5291 const_arg = equiv_constant (folded_arg);
5292 break;
5293 #endif
5295 default:
5296 folded_arg = fold_rtx (arg, insn);
5297 const_arg = equiv_constant (folded_arg);
5300 /* For the first three operands, see if the operand
5301 is constant or equivalent to a constant. */
5302 switch (i)
5304 case 0:
5305 folded_arg0 = folded_arg;
5306 const_arg0 = const_arg;
5307 mode_arg0 = mode_arg;
5308 break;
5309 case 1:
5310 folded_arg1 = folded_arg;
5311 const_arg1 = const_arg;
5312 break;
5313 case 2:
5314 const_arg2 = const_arg;
5315 break;
5318 /* Pick the least expensive of the folded argument and an
5319 equivalent constant argument. */
5320 if (const_arg == 0 || const_arg == folded_arg
5321 || COST (const_arg) > COST (folded_arg))
5322 cheap_arg = folded_arg, expensive_arg = const_arg;
5323 else
5324 cheap_arg = const_arg, expensive_arg = folded_arg;
5326 /* Try to replace the operand with the cheapest of the two
5327 possibilities. If it doesn't work and this is either of the first
5328 two operands of a commutative operation, try swapping them.
5329 If THAT fails, try the more expensive, provided it is cheaper
5330 than what is already there. */
5332 if (cheap_arg == XEXP (x, i))
5333 continue;
5335 if (insn == 0 && ! copied)
5337 x = copy_rtx (x);
5338 copied = 1;
5341 replacements[0] = cheap_arg, replacements[1] = expensive_arg;
5342 for (j = 0;
5343 j < 2 && replacements[j]
5344 && COST (replacements[j]) < COST (XEXP (x, i));
5345 j++)
5347 if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
5348 break;
5350 if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
5352 validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
5353 validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
5355 if (apply_change_group ())
5357 /* Swap them back to be invalid so that this loop can
5358 continue and flag them to be swapped back later. */
5359 rtx tem;
5361 tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
5362 XEXP (x, 1) = tem;
5363 must_swap = 1;
5364 break;
5370 else if (fmt[i] == 'E')
5371 /* Don't try to fold inside of a vector of expressions.
5372 Doing nothing is harmless. */
5375 /* If a commutative operation, place a constant integer as the second
5376 operand unless the first operand is also a constant integer. Otherwise,
5377 place any constant second unless the first operand is also a constant. */
5379 if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
5381 if (must_swap || (const_arg0
5382 && (const_arg1 == 0
5383 || (GET_CODE (const_arg0) == CONST_INT
5384 && GET_CODE (const_arg1) != CONST_INT))))
5386 register rtx tem = XEXP (x, 0);
5388 if (insn == 0 && ! copied)
5390 x = copy_rtx (x);
5391 copied = 1;
5394 validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
5395 validate_change (insn, &XEXP (x, 1), tem, 1);
5396 if (apply_change_group ())
5398 tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
5399 tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
5404 /* If X is an arithmetic operation, see if we can simplify it. */
5406 switch (GET_RTX_CLASS (code))
5408 case '1':
5410 int is_const = 0;
5412 /* We can't simplify extension ops unless we know the
5413 original mode. */
5414 if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
5415 && mode_arg0 == VOIDmode)
5416 break;
5418 /* If we had a CONST, strip it off and put it back later if we
5419 fold. */
5420 if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
5421 is_const = 1, const_arg0 = XEXP (const_arg0, 0);
5423 new = simplify_unary_operation (code, mode,
5424 const_arg0 ? const_arg0 : folded_arg0,
5425 mode_arg0);
5426 if (new != 0 && is_const)
5427 new = gen_rtx (CONST, mode, new);
5429 break;
5431 case '<':
5432 /* See what items are actually being compared and set FOLDED_ARG[01]
5433 to those values and CODE to the actual comparison code. If any are
5434 constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
5435 do anything if both operands are already known to be constant. */
5437 if (const_arg0 == 0 || const_arg1 == 0)
5439 struct table_elt *p0, *p1;
5440 rtx true = const_true_rtx, false = const0_rtx;
5441 enum machine_mode mode_arg1;
5443 #ifdef FLOAT_STORE_FLAG_VALUE
5444 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5446 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5447 mode);
5448 false = CONST0_RTX (mode);
5450 #endif
5452 code = find_comparison_args (code, &folded_arg0, &folded_arg1,
5453 &mode_arg0, &mode_arg1);
5454 const_arg0 = equiv_constant (folded_arg0);
5455 const_arg1 = equiv_constant (folded_arg1);
5457 /* If the mode is VOIDmode or a MODE_CC mode, we don't know
5458 what kinds of things are being compared, so we can't do
5459 anything with this comparison. */
5461 if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
5462 break;
5464 /* If we do not now have two constants being compared, see
5465 if we can nevertheless deduce some things about the
5466 comparison. */
5467 if (const_arg0 == 0 || const_arg1 == 0)
5469 /* Is FOLDED_ARG0 frame-pointer plus a constant? Or
5470 non-explicit constant? These aren't zero, but we
5471 don't know their sign. */
5472 if (const_arg1 == const0_rtx
5473 && (NONZERO_BASE_PLUS_P (folded_arg0)
5474 #if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
5475 come out as 0. */
5476 || GET_CODE (folded_arg0) == SYMBOL_REF
5477 #endif
5478 || GET_CODE (folded_arg0) == LABEL_REF
5479 || GET_CODE (folded_arg0) == CONST))
5481 if (code == EQ)
5482 return false;
5483 else if (code == NE)
5484 return true;
5487 /* See if the two operands are the same. We don't do this
5488 for IEEE floating-point since we can't assume x == x
5489 since x might be a NaN. */
5491 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5492 || ! FLOAT_MODE_P (mode_arg0) || flag_fast_math)
5493 && (folded_arg0 == folded_arg1
5494 || (GET_CODE (folded_arg0) == REG
5495 && GET_CODE (folded_arg1) == REG
5496 && (reg_qty[REGNO (folded_arg0)]
5497 == reg_qty[REGNO (folded_arg1)]))
5498 || ((p0 = lookup (folded_arg0,
5499 (safe_hash (folded_arg0, mode_arg0)
5500 % NBUCKETS), mode_arg0))
5501 && (p1 = lookup (folded_arg1,
5502 (safe_hash (folded_arg1, mode_arg0)
5503 % NBUCKETS), mode_arg0))
5504 && p0->first_same_value == p1->first_same_value)))
5505 return ((code == EQ || code == LE || code == GE
5506 || code == LEU || code == GEU)
5507 ? true : false);
5509 /* If FOLDED_ARG0 is a register, see if the comparison we are
5510 doing now is either the same as we did before or the reverse
5511 (we only check the reverse if not floating-point). */
5512 else if (GET_CODE (folded_arg0) == REG)
5514 int qty = reg_qty[REGNO (folded_arg0)];
5516 if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
5517 && (comparison_dominates_p (qty_comparison_code[qty], code)
5518 || (comparison_dominates_p (qty_comparison_code[qty],
5519 reverse_condition (code))
5520 && ! FLOAT_MODE_P (mode_arg0)))
5521 && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
5522 || (const_arg1
5523 && rtx_equal_p (qty_comparison_const[qty],
5524 const_arg1))
5525 || (GET_CODE (folded_arg1) == REG
5526 && (reg_qty[REGNO (folded_arg1)]
5527 == qty_comparison_qty[qty]))))
5528 return (comparison_dominates_p (qty_comparison_code[qty],
5529 code)
5530 ? true : false);
5535 /* If we are comparing against zero, see if the first operand is
5536 equivalent to an IOR with a constant. If so, we may be able to
5537 determine the result of this comparison. */
5539 if (const_arg1 == const0_rtx)
5541 rtx y = lookup_as_function (folded_arg0, IOR);
5542 rtx inner_const;
5544 if (y != 0
5545 && (inner_const = equiv_constant (XEXP (y, 1))) != 0
5546 && GET_CODE (inner_const) == CONST_INT
5547 && INTVAL (inner_const) != 0)
5549 int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
5550 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5551 && (INTVAL (inner_const)
5552 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
5553 rtx true = const_true_rtx, false = const0_rtx;
5555 #ifdef FLOAT_STORE_FLAG_VALUE
5556 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5558 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5559 mode);
5560 false = CONST0_RTX (mode);
5562 #endif
5564 switch (code)
5566 case EQ:
5567 return false;
5568 case NE:
5569 return true;
5570 case LT: case LE:
5571 if (has_sign)
5572 return true;
5573 break;
5574 case GT: case GE:
5575 if (has_sign)
5576 return false;
5577 break;
5578 default:
5579 break;
5584 new = simplify_relational_operation (code, mode_arg0,
5585 const_arg0 ? const_arg0 : folded_arg0,
5586 const_arg1 ? const_arg1 : folded_arg1);
5587 #ifdef FLOAT_STORE_FLAG_VALUE
5588 if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
5589 new = ((new == const0_rtx) ? CONST0_RTX (mode)
5590 : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE, mode));
5591 #endif
5592 break;
5594 case '2':
5595 case 'c':
5596 switch (code)
5598 case PLUS:
5599 /* If the second operand is a LABEL_REF, see if the first is a MINUS
5600 with that LABEL_REF as its second operand. If so, the result is
5601 the first operand of that MINUS. This handles switches with an
5602 ADDR_DIFF_VEC table. */
5603 if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
5605 rtx y
5606 = GET_CODE (folded_arg0) == MINUS ? folded_arg0
5607 : lookup_as_function (folded_arg0, MINUS);
5609 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5610 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
5611 return XEXP (y, 0);
5613 /* Now try for a CONST of a MINUS like the above. */
5614 if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
5615 : lookup_as_function (folded_arg0, CONST))) != 0
5616 && GET_CODE (XEXP (y, 0)) == MINUS
5617 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5618 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg1, 0))
5619 return XEXP (XEXP (y, 0), 0);
5622 /* Likewise if the operands are in the other order. */
5623 if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
5625 rtx y
5626 = GET_CODE (folded_arg1) == MINUS ? folded_arg1
5627 : lookup_as_function (folded_arg1, MINUS);
5629 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5630 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0))
5631 return XEXP (y, 0);
5633 /* Now try for a CONST of a MINUS like the above. */
5634 if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
5635 : lookup_as_function (folded_arg1, CONST))) != 0
5636 && GET_CODE (XEXP (y, 0)) == MINUS
5637 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5638 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg0, 0))
5639 return XEXP (XEXP (y, 0), 0);
5642 /* If second operand is a register equivalent to a negative
5643 CONST_INT, see if we can find a register equivalent to the
5644 positive constant. Make a MINUS if so. Don't do this for
5645 a non-negative constant since we might then alternate between
5646 chosing positive and negative constants. Having the positive
5647 constant previously-used is the more common case. Be sure
5648 the resulting constant is non-negative; if const_arg1 were
5649 the smallest negative number this would overflow: depending
5650 on the mode, this would either just be the same value (and
5651 hence not save anything) or be incorrect. */
5652 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT
5653 && INTVAL (const_arg1) < 0
5654 && - INTVAL (const_arg1) >= 0
5655 && GET_CODE (folded_arg1) == REG)
5657 rtx new_const = GEN_INT (- INTVAL (const_arg1));
5658 struct table_elt *p
5659 = lookup (new_const, safe_hash (new_const, mode) % NBUCKETS,
5660 mode);
5662 if (p)
5663 for (p = p->first_same_value; p; p = p->next_same_value)
5664 if (GET_CODE (p->exp) == REG)
5665 return cse_gen_binary (MINUS, mode, folded_arg0,
5666 canon_reg (p->exp, NULL_RTX));
5668 goto from_plus;
5670 case MINUS:
5671 /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
5672 If so, produce (PLUS Z C2-C). */
5673 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
5675 rtx y = lookup_as_function (XEXP (x, 0), PLUS);
5676 if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
5677 return fold_rtx (plus_constant (copy_rtx (y),
5678 -INTVAL (const_arg1)),
5679 NULL_RTX);
5682 /* ... fall through ... */
5684 from_plus:
5685 case SMIN: case SMAX: case UMIN: case UMAX:
5686 case IOR: case AND: case XOR:
5687 case MULT: case DIV: case UDIV:
5688 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
5689 /* If we have (<op> <reg> <const_int>) for an associative OP and REG
5690 is known to be of similar form, we may be able to replace the
5691 operation with a combined operation. This may eliminate the
5692 intermediate operation if every use is simplified in this way.
5693 Note that the similar optimization done by combine.c only works
5694 if the intermediate operation's result has only one reference. */
5696 if (GET_CODE (folded_arg0) == REG
5697 && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
5699 int is_shift
5700 = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
5701 rtx y = lookup_as_function (folded_arg0, code);
5702 rtx inner_const;
5703 enum rtx_code associate_code;
5704 rtx new_const;
5706 if (y == 0
5707 || 0 == (inner_const
5708 = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
5709 || GET_CODE (inner_const) != CONST_INT
5710 /* If we have compiled a statement like
5711 "if (x == (x & mask1))", and now are looking at
5712 "x & mask2", we will have a case where the first operand
5713 of Y is the same as our first operand. Unless we detect
5714 this case, an infinite loop will result. */
5715 || XEXP (y, 0) == folded_arg0)
5716 break;
5718 /* Don't associate these operations if they are a PLUS with the
5719 same constant and it is a power of two. These might be doable
5720 with a pre- or post-increment. Similarly for two subtracts of
5721 identical powers of two with post decrement. */
5723 if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
5724 && (0
5725 #if defined(HAVE_PRE_INCREMENT) || defined(HAVE_POST_INCREMENT)
5726 || exact_log2 (INTVAL (const_arg1)) >= 0
5727 #endif
5728 #if defined(HAVE_PRE_DECREMENT) || defined(HAVE_POST_DECREMENT)
5729 || exact_log2 (- INTVAL (const_arg1)) >= 0
5730 #endif
5732 break;
5734 /* Compute the code used to compose the constants. For example,
5735 A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
5737 associate_code
5738 = (code == MULT || code == DIV || code == UDIV ? MULT
5739 : is_shift || code == PLUS || code == MINUS ? PLUS : code);
5741 new_const = simplify_binary_operation (associate_code, mode,
5742 const_arg1, inner_const);
5744 if (new_const == 0)
5745 break;
5747 /* If we are associating shift operations, don't let this
5748 produce a shift of the size of the object or larger.
5749 This could occur when we follow a sign-extend by a right
5750 shift on a machine that does a sign-extend as a pair
5751 of shifts. */
5753 if (is_shift && GET_CODE (new_const) == CONST_INT
5754 && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
5756 /* As an exception, we can turn an ASHIFTRT of this
5757 form into a shift of the number of bits - 1. */
5758 if (code == ASHIFTRT)
5759 new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
5760 else
5761 break;
5764 y = copy_rtx (XEXP (y, 0));
5766 /* If Y contains our first operand (the most common way this
5767 can happen is if Y is a MEM), we would do into an infinite
5768 loop if we tried to fold it. So don't in that case. */
5770 if (! reg_mentioned_p (folded_arg0, y))
5771 y = fold_rtx (y, insn);
5773 return cse_gen_binary (code, mode, y, new_const);
5775 break;
5777 default:
5778 break;
5781 new = simplify_binary_operation (code, mode,
5782 const_arg0 ? const_arg0 : folded_arg0,
5783 const_arg1 ? const_arg1 : folded_arg1);
5784 break;
5786 case 'o':
5787 /* (lo_sum (high X) X) is simply X. */
5788 if (code == LO_SUM && const_arg0 != 0
5789 && GET_CODE (const_arg0) == HIGH
5790 && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
5791 return const_arg1;
5792 break;
5794 case '3':
5795 case 'b':
5796 new = simplify_ternary_operation (code, mode, mode_arg0,
5797 const_arg0 ? const_arg0 : folded_arg0,
5798 const_arg1 ? const_arg1 : folded_arg1,
5799 const_arg2 ? const_arg2 : XEXP (x, 2));
5800 break;
5803 return new ? new : x;
5806 /* Return a constant value currently equivalent to X.
5807 Return 0 if we don't know one. */
5809 static rtx
5810 equiv_constant (x)
5811 rtx x;
5813 if (GET_CODE (x) == REG
5814 && REGNO_QTY_VALID_P (REGNO (x))
5815 && qty_const[reg_qty[REGNO (x)]])
5816 x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
5818 if (x != 0 && CONSTANT_P (x))
5819 return x;
5821 /* If X is a MEM, try to fold it outside the context of any insn to see if
5822 it might be equivalent to a constant. That handles the case where it
5823 is a constant-pool reference. Then try to look it up in the hash table
5824 in case it is something whose value we have seen before. */
5826 if (GET_CODE (x) == MEM)
5828 struct table_elt *elt;
5830 x = fold_rtx (x, NULL_RTX);
5831 if (CONSTANT_P (x))
5832 return x;
5834 elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
5835 if (elt == 0)
5836 return 0;
5838 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
5839 if (elt->is_const && CONSTANT_P (elt->exp))
5840 return elt->exp;
5843 return 0;
5846 /* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
5847 number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
5848 least-significant part of X.
5849 MODE specifies how big a part of X to return.
5851 If the requested operation cannot be done, 0 is returned.
5853 This is similar to gen_lowpart in emit-rtl.c. */
5856 gen_lowpart_if_possible (mode, x)
5857 enum machine_mode mode;
5858 register rtx x;
5860 rtx result = gen_lowpart_common (mode, x);
5862 if (result)
5863 return result;
5864 else if (GET_CODE (x) == MEM)
5866 /* This is the only other case we handle. */
5867 register int offset = 0;
5868 rtx new;
5870 if (WORDS_BIG_ENDIAN)
5871 offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
5872 - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
5873 if (BYTES_BIG_ENDIAN)
5874 /* Adjust the address so that the address-after-the-data is
5875 unchanged. */
5876 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
5877 - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
5878 new = gen_rtx (MEM, mode, plus_constant (XEXP (x, 0), offset));
5879 if (! memory_address_p (mode, XEXP (new, 0)))
5880 return 0;
5881 MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
5882 RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
5883 MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
5884 return new;
5886 else
5887 return 0;
5890 /* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
5891 branch. It will be zero if not.
5893 In certain cases, this can cause us to add an equivalence. For example,
5894 if we are following the taken case of
5895 if (i == 2)
5896 we can add the fact that `i' and '2' are now equivalent.
5898 In any case, we can record that this comparison was passed. If the same
5899 comparison is seen later, we will know its value. */
5901 static void
5902 record_jump_equiv (insn, taken)
5903 rtx insn;
5904 int taken;
5906 int cond_known_true;
5907 rtx op0, op1;
5908 enum machine_mode mode, mode0, mode1;
5909 int reversed_nonequality = 0;
5910 enum rtx_code code;
5912 /* Ensure this is the right kind of insn. */
5913 if (! condjump_p (insn) || simplejump_p (insn))
5914 return;
5916 /* See if this jump condition is known true or false. */
5917 if (taken)
5918 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
5919 else
5920 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
5922 /* Get the type of comparison being done and the operands being compared.
5923 If we had to reverse a non-equality condition, record that fact so we
5924 know that it isn't valid for floating-point. */
5925 code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
5926 op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
5927 op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
5929 code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
5930 if (! cond_known_true)
5932 reversed_nonequality = (code != EQ && code != NE);
5933 code = reverse_condition (code);
5936 /* The mode is the mode of the non-constant. */
5937 mode = mode0;
5938 if (mode1 != VOIDmode)
5939 mode = mode1;
5941 record_jump_cond (code, mode, op0, op1, reversed_nonequality);
5944 /* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
5945 REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
5946 Make any useful entries we can with that information. Called from
5947 above function and called recursively. */
5949 static void
5950 record_jump_cond (code, mode, op0, op1, reversed_nonequality)
5951 enum rtx_code code;
5952 enum machine_mode mode;
5953 rtx op0, op1;
5954 int reversed_nonequality;
5956 unsigned op0_hash, op1_hash;
5957 int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
5958 struct table_elt *op0_elt, *op1_elt;
5960 /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
5961 we know that they are also equal in the smaller mode (this is also
5962 true for all smaller modes whether or not there is a SUBREG, but
5963 is not worth testing for with no SUBREG. */
5965 /* Note that GET_MODE (op0) may not equal MODE. */
5966 if (code == EQ && GET_CODE (op0) == SUBREG
5967 && (GET_MODE_SIZE (GET_MODE (op0))
5968 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5970 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5971 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5973 record_jump_cond (code, mode, SUBREG_REG (op0),
5974 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5975 reversed_nonequality);
5978 if (code == EQ && GET_CODE (op1) == SUBREG
5979 && (GET_MODE_SIZE (GET_MODE (op1))
5980 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5982 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5983 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5985 record_jump_cond (code, mode, SUBREG_REG (op1),
5986 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5987 reversed_nonequality);
5990 /* Similarly, if this is an NE comparison, and either is a SUBREG
5991 making a smaller mode, we know the whole thing is also NE. */
5993 /* Note that GET_MODE (op0) may not equal MODE;
5994 if we test MODE instead, we can get an infinite recursion
5995 alternating between two modes each wider than MODE. */
5997 if (code == NE && GET_CODE (op0) == SUBREG
5998 && subreg_lowpart_p (op0)
5999 && (GET_MODE_SIZE (GET_MODE (op0))
6000 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
6002 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
6003 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
6005 record_jump_cond (code, mode, SUBREG_REG (op0),
6006 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
6007 reversed_nonequality);
6010 if (code == NE && GET_CODE (op1) == SUBREG
6011 && subreg_lowpart_p (op1)
6012 && (GET_MODE_SIZE (GET_MODE (op1))
6013 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
6015 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
6016 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
6018 record_jump_cond (code, mode, SUBREG_REG (op1),
6019 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
6020 reversed_nonequality);
6023 /* Hash both operands. */
6025 do_not_record = 0;
6026 hash_arg_in_memory = 0;
6027 hash_arg_in_struct = 0;
6028 op0_hash = HASH (op0, mode);
6029 op0_in_memory = hash_arg_in_memory;
6030 op0_in_struct = hash_arg_in_struct;
6032 if (do_not_record)
6033 return;
6035 do_not_record = 0;
6036 hash_arg_in_memory = 0;
6037 hash_arg_in_struct = 0;
6038 op1_hash = HASH (op1, mode);
6039 op1_in_memory = hash_arg_in_memory;
6040 op1_in_struct = hash_arg_in_struct;
6042 if (do_not_record)
6043 return;
6045 /* Look up both operands. */
6046 op0_elt = lookup (op0, op0_hash, mode);
6047 op1_elt = lookup (op1, op1_hash, mode);
6049 /* If both operands are already equivalent or if they are not in the
6050 table but are identical, do nothing. */
6051 if ((op0_elt != 0 && op1_elt != 0
6052 && op0_elt->first_same_value == op1_elt->first_same_value)
6053 || op0 == op1 || rtx_equal_p (op0, op1))
6054 return;
6056 /* If we aren't setting two things equal all we can do is save this
6057 comparison. Similarly if this is floating-point. In the latter
6058 case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
6059 If we record the equality, we might inadvertently delete code
6060 whose intent was to change -0 to +0. */
6062 if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
6064 /* If we reversed a floating-point comparison, if OP0 is not a
6065 register, or if OP1 is neither a register or constant, we can't
6066 do anything. */
6068 if (GET_CODE (op1) != REG)
6069 op1 = equiv_constant (op1);
6071 if ((reversed_nonequality && FLOAT_MODE_P (mode))
6072 || GET_CODE (op0) != REG || op1 == 0)
6073 return;
6075 /* Put OP0 in the hash table if it isn't already. This gives it a
6076 new quantity number. */
6077 if (op0_elt == 0)
6079 if (insert_regs (op0, NULL_PTR, 0))
6081 rehash_using_reg (op0);
6082 op0_hash = HASH (op0, mode);
6084 /* If OP0 is contained in OP1, this changes its hash code
6085 as well. Faster to rehash than to check, except
6086 for the simple case of a constant. */
6087 if (! CONSTANT_P (op1))
6088 op1_hash = HASH (op1,mode);
6091 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6092 op0_elt->in_memory = op0_in_memory;
6093 op0_elt->in_struct = op0_in_struct;
6096 qty_comparison_code[reg_qty[REGNO (op0)]] = code;
6097 if (GET_CODE (op1) == REG)
6099 /* Look it up again--in case op0 and op1 are the same. */
6100 op1_elt = lookup (op1, op1_hash, mode);
6102 /* Put OP1 in the hash table so it gets a new quantity number. */
6103 if (op1_elt == 0)
6105 if (insert_regs (op1, NULL_PTR, 0))
6107 rehash_using_reg (op1);
6108 op1_hash = HASH (op1, mode);
6111 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6112 op1_elt->in_memory = op1_in_memory;
6113 op1_elt->in_struct = op1_in_struct;
6116 qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
6117 qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
6119 else
6121 qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
6122 qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
6125 return;
6128 /* If either side is still missing an equivalence, make it now,
6129 then merge the equivalences. */
6131 if (op0_elt == 0)
6133 if (insert_regs (op0, NULL_PTR, 0))
6135 rehash_using_reg (op0);
6136 op0_hash = HASH (op0, mode);
6139 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6140 op0_elt->in_memory = op0_in_memory;
6141 op0_elt->in_struct = op0_in_struct;
6144 if (op1_elt == 0)
6146 if (insert_regs (op1, NULL_PTR, 0))
6148 rehash_using_reg (op1);
6149 op1_hash = HASH (op1, mode);
6152 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6153 op1_elt->in_memory = op1_in_memory;
6154 op1_elt->in_struct = op1_in_struct;
6157 merge_equiv_classes (op0_elt, op1_elt);
6158 last_jump_equiv_class = op0_elt;
6161 /* CSE processing for one instruction.
6162 First simplify sources and addresses of all assignments
6163 in the instruction, using previously-computed equivalents values.
6164 Then install the new sources and destinations in the table
6165 of available values.
6167 If IN_LIBCALL_BLOCK is nonzero, don't record any equivalence made in
6168 the insn. */
6170 /* Data on one SET contained in the instruction. */
6172 struct set
6174 /* The SET rtx itself. */
6175 rtx rtl;
6176 /* The SET_SRC of the rtx (the original value, if it is changing). */
6177 rtx src;
6178 /* The hash-table element for the SET_SRC of the SET. */
6179 struct table_elt *src_elt;
6180 /* Hash value for the SET_SRC. */
6181 unsigned src_hash;
6182 /* Hash value for the SET_DEST. */
6183 unsigned dest_hash;
6184 /* The SET_DEST, with SUBREG, etc., stripped. */
6185 rtx inner_dest;
6186 /* Place where the pointer to the INNER_DEST was found. */
6187 rtx *inner_dest_loc;
6188 /* Nonzero if the SET_SRC is in memory. */
6189 char src_in_memory;
6190 /* Nonzero if the SET_SRC is in a structure. */
6191 char src_in_struct;
6192 /* Nonzero if the SET_SRC contains something
6193 whose value cannot be predicted and understood. */
6194 char src_volatile;
6195 /* Original machine mode, in case it becomes a CONST_INT. */
6196 enum machine_mode mode;
6197 /* A constant equivalent for SET_SRC, if any. */
6198 rtx src_const;
6199 /* Hash value of constant equivalent for SET_SRC. */
6200 unsigned src_const_hash;
6201 /* Table entry for constant equivalent for SET_SRC, if any. */
6202 struct table_elt *src_const_elt;
6205 static void
6206 cse_insn (insn, in_libcall_block)
6207 rtx insn;
6208 int in_libcall_block;
6210 register rtx x = PATTERN (insn);
6211 register int i;
6212 rtx tem;
6213 register int n_sets = 0;
6215 /* Records what this insn does to set CC0. */
6216 rtx this_insn_cc0 = 0;
6217 enum machine_mode this_insn_cc0_mode = VOIDmode;
6218 struct write_data writes_memory;
6219 static struct write_data init = {0, 0, 0, 0};
6221 rtx src_eqv = 0;
6222 struct table_elt *src_eqv_elt = 0;
6223 int src_eqv_volatile;
6224 int src_eqv_in_memory;
6225 int src_eqv_in_struct;
6226 unsigned src_eqv_hash;
6228 struct set *sets;
6230 this_insn = insn;
6231 writes_memory = init;
6233 /* Find all the SETs and CLOBBERs in this instruction.
6234 Record all the SETs in the array `set' and count them.
6235 Also determine whether there is a CLOBBER that invalidates
6236 all memory references, or all references at varying addresses. */
6238 if (GET_CODE (insn) == CALL_INSN)
6240 for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
6241 if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
6242 invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
6245 if (GET_CODE (x) == SET)
6247 sets = (struct set *) alloca (sizeof (struct set));
6248 sets[0].rtl = x;
6250 /* Ignore SETs that are unconditional jumps.
6251 They never need cse processing, so this does not hurt.
6252 The reason is not efficiency but rather
6253 so that we can test at the end for instructions
6254 that have been simplified to unconditional jumps
6255 and not be misled by unchanged instructions
6256 that were unconditional jumps to begin with. */
6257 if (SET_DEST (x) == pc_rtx
6258 && GET_CODE (SET_SRC (x)) == LABEL_REF)
6261 /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
6262 The hard function value register is used only once, to copy to
6263 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
6264 Ensure we invalidate the destination register. On the 80386 no
6265 other code would invalidate it since it is a fixed_reg.
6266 We need not check the return of apply_change_group; see canon_reg. */
6268 else if (GET_CODE (SET_SRC (x)) == CALL)
6270 canon_reg (SET_SRC (x), insn);
6271 apply_change_group ();
6272 fold_rtx (SET_SRC (x), insn);
6273 invalidate (SET_DEST (x), VOIDmode);
6275 else
6276 n_sets = 1;
6278 else if (GET_CODE (x) == PARALLEL)
6280 register int lim = XVECLEN (x, 0);
6282 sets = (struct set *) alloca (lim * sizeof (struct set));
6284 /* Find all regs explicitly clobbered in this insn,
6285 and ensure they are not replaced with any other regs
6286 elsewhere in this insn.
6287 When a reg that is clobbered is also used for input,
6288 we should presume that that is for a reason,
6289 and we should not substitute some other register
6290 which is not supposed to be clobbered.
6291 Therefore, this loop cannot be merged into the one below
6292 because a CALL may precede a CLOBBER and refer to the
6293 value clobbered. We must not let a canonicalization do
6294 anything in that case. */
6295 for (i = 0; i < lim; i++)
6297 register rtx y = XVECEXP (x, 0, i);
6298 if (GET_CODE (y) == CLOBBER)
6300 rtx clobbered = XEXP (y, 0);
6302 if (GET_CODE (clobbered) == REG
6303 || GET_CODE (clobbered) == SUBREG)
6304 invalidate (clobbered, VOIDmode);
6305 else if (GET_CODE (clobbered) == STRICT_LOW_PART
6306 || GET_CODE (clobbered) == ZERO_EXTRACT)
6307 invalidate (XEXP (clobbered, 0), GET_MODE (clobbered));
6311 for (i = 0; i < lim; i++)
6313 register rtx y = XVECEXP (x, 0, i);
6314 if (GET_CODE (y) == SET)
6316 /* As above, we ignore unconditional jumps and call-insns and
6317 ignore the result of apply_change_group. */
6318 if (GET_CODE (SET_SRC (y)) == CALL)
6320 canon_reg (SET_SRC (y), insn);
6321 apply_change_group ();
6322 fold_rtx (SET_SRC (y), insn);
6323 invalidate (SET_DEST (y), VOIDmode);
6325 else if (SET_DEST (y) == pc_rtx
6326 && GET_CODE (SET_SRC (y)) == LABEL_REF)
6328 else
6329 sets[n_sets++].rtl = y;
6331 else if (GET_CODE (y) == CLOBBER)
6333 /* If we clobber memory, take note of that,
6334 and canon the address.
6335 This does nothing when a register is clobbered
6336 because we have already invalidated the reg. */
6337 if (GET_CODE (XEXP (y, 0)) == MEM)
6339 canon_reg (XEXP (y, 0), NULL_RTX);
6340 note_mem_written (XEXP (y, 0), &writes_memory);
6343 else if (GET_CODE (y) == USE
6344 && ! (GET_CODE (XEXP (y, 0)) == REG
6345 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
6346 canon_reg (y, NULL_RTX);
6347 else if (GET_CODE (y) == CALL)
6349 /* The result of apply_change_group can be ignored; see
6350 canon_reg. */
6351 canon_reg (y, insn);
6352 apply_change_group ();
6353 fold_rtx (y, insn);
6357 else if (GET_CODE (x) == CLOBBER)
6359 if (GET_CODE (XEXP (x, 0)) == MEM)
6361 canon_reg (XEXP (x, 0), NULL_RTX);
6362 note_mem_written (XEXP (x, 0), &writes_memory);
6366 /* Canonicalize a USE of a pseudo register or memory location. */
6367 else if (GET_CODE (x) == USE
6368 && ! (GET_CODE (XEXP (x, 0)) == REG
6369 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
6370 canon_reg (XEXP (x, 0), NULL_RTX);
6371 else if (GET_CODE (x) == CALL)
6373 /* The result of apply_change_group can be ignored; see canon_reg. */
6374 canon_reg (x, insn);
6375 apply_change_group ();
6376 fold_rtx (x, insn);
6379 /* Store the equivalent value in SRC_EQV, if different, or if the DEST
6380 is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV
6381 is handled specially for this case, and if it isn't set, then there will
6382 be no equivalence for the destination. */
6383 if (n_sets == 1 && REG_NOTES (insn) != 0
6384 && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
6385 && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
6386 || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
6387 src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
6389 /* Canonicalize sources and addresses of destinations.
6390 We do this in a separate pass to avoid problems when a MATCH_DUP is
6391 present in the insn pattern. In that case, we want to ensure that
6392 we don't break the duplicate nature of the pattern. So we will replace
6393 both operands at the same time. Otherwise, we would fail to find an
6394 equivalent substitution in the loop calling validate_change below.
6396 We used to suppress canonicalization of DEST if it appears in SRC,
6397 but we don't do this any more. */
6399 for (i = 0; i < n_sets; i++)
6401 rtx dest = SET_DEST (sets[i].rtl);
6402 rtx src = SET_SRC (sets[i].rtl);
6403 rtx new = canon_reg (src, insn);
6404 int insn_code;
6406 if ((GET_CODE (new) == REG && GET_CODE (src) == REG
6407 && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
6408 != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
6409 || (insn_code = recog_memoized (insn)) < 0
6410 || insn_n_dups[insn_code] > 0)
6411 validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
6412 else
6413 SET_SRC (sets[i].rtl) = new;
6415 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
6417 validate_change (insn, &XEXP (dest, 1),
6418 canon_reg (XEXP (dest, 1), insn), 1);
6419 validate_change (insn, &XEXP (dest, 2),
6420 canon_reg (XEXP (dest, 2), insn), 1);
6423 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
6424 || GET_CODE (dest) == ZERO_EXTRACT
6425 || GET_CODE (dest) == SIGN_EXTRACT)
6426 dest = XEXP (dest, 0);
6428 if (GET_CODE (dest) == MEM)
6429 canon_reg (dest, insn);
6432 /* Now that we have done all the replacements, we can apply the change
6433 group and see if they all work. Note that this will cause some
6434 canonicalizations that would have worked individually not to be applied
6435 because some other canonicalization didn't work, but this should not
6436 occur often.
6438 The result of apply_change_group can be ignored; see canon_reg. */
6440 apply_change_group ();
6442 /* Set sets[i].src_elt to the class each source belongs to.
6443 Detect assignments from or to volatile things
6444 and set set[i] to zero so they will be ignored
6445 in the rest of this function.
6447 Nothing in this loop changes the hash table or the register chains. */
6449 for (i = 0; i < n_sets; i++)
6451 register rtx src, dest;
6452 register rtx src_folded;
6453 register struct table_elt *elt = 0, *p;
6454 enum machine_mode mode;
6455 rtx src_eqv_here;
6456 rtx src_const = 0;
6457 rtx src_related = 0;
6458 struct table_elt *src_const_elt = 0;
6459 int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
6460 int src_related_cost = 10000, src_elt_cost = 10000;
6461 /* Set non-zero if we need to call force_const_mem on with the
6462 contents of src_folded before using it. */
6463 int src_folded_force_flag = 0;
6465 dest = SET_DEST (sets[i].rtl);
6466 src = SET_SRC (sets[i].rtl);
6468 /* If SRC is a constant that has no machine mode,
6469 hash it with the destination's machine mode.
6470 This way we can keep different modes separate. */
6472 mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
6473 sets[i].mode = mode;
6475 if (src_eqv)
6477 enum machine_mode eqvmode = mode;
6478 if (GET_CODE (dest) == STRICT_LOW_PART)
6479 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
6480 do_not_record = 0;
6481 hash_arg_in_memory = 0;
6482 hash_arg_in_struct = 0;
6483 src_eqv = fold_rtx (src_eqv, insn);
6484 src_eqv_hash = HASH (src_eqv, eqvmode);
6486 /* Find the equivalence class for the equivalent expression. */
6488 if (!do_not_record)
6489 src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);
6491 src_eqv_volatile = do_not_record;
6492 src_eqv_in_memory = hash_arg_in_memory;
6493 src_eqv_in_struct = hash_arg_in_struct;
6496 /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
6497 value of the INNER register, not the destination. So it is not
6498 a valid substitution for the source. But save it for later. */
6499 if (GET_CODE (dest) == STRICT_LOW_PART)
6500 src_eqv_here = 0;
6501 else
6502 src_eqv_here = src_eqv;
6504 /* Simplify and foldable subexpressions in SRC. Then get the fully-
6505 simplified result, which may not necessarily be valid. */
6506 src_folded = fold_rtx (src, insn);
6508 #if 0
6509 /* ??? This caused bad code to be generated for the m68k port with -O2.
6510 Suppose src is (CONST_INT -1), and that after truncation src_folded
6511 is (CONST_INT 3). Suppose src_folded is then used for src_const.
6512 At the end we will add src and src_const to the same equivalence
6513 class. We now have 3 and -1 on the same equivalence class. This
6514 causes later instructions to be mis-optimized. */
6515 /* If storing a constant in a bitfield, pre-truncate the constant
6516 so we will be able to record it later. */
6517 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6518 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6520 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6522 if (GET_CODE (src) == CONST_INT
6523 && GET_CODE (width) == CONST_INT
6524 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6525 && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6526 src_folded
6527 = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
6528 << INTVAL (width)) - 1));
6530 #endif
6532 /* Compute SRC's hash code, and also notice if it
6533 should not be recorded at all. In that case,
6534 prevent any further processing of this assignment. */
6535 do_not_record = 0;
6536 hash_arg_in_memory = 0;
6537 hash_arg_in_struct = 0;
6539 sets[i].src = src;
6540 sets[i].src_hash = HASH (src, mode);
6541 sets[i].src_volatile = do_not_record;
6542 sets[i].src_in_memory = hash_arg_in_memory;
6543 sets[i].src_in_struct = hash_arg_in_struct;
6545 /* If SRC is a MEM, there is a REG_EQUIV note for SRC, and DEST is
6546 a pseudo that is set more than once, do not record SRC. Using
6547 SRC as a replacement for anything else will be incorrect in that
6548 situation. Note that this usually occurs only for stack slots,
6549 in which case all the RTL would be referring to SRC, so we don't
6550 lose any optimization opportunities by not having SRC in the
6551 hash table. */
6553 if (GET_CODE (src) == MEM
6554 && find_reg_note (insn, REG_EQUIV, src) != 0
6555 && GET_CODE (dest) == REG
6556 && REGNO (dest) >= FIRST_PSEUDO_REGISTER
6557 && REG_N_SETS (REGNO (dest)) != 1)
6558 sets[i].src_volatile = 1;
6560 #if 0
6561 /* It is no longer clear why we used to do this, but it doesn't
6562 appear to still be needed. So let's try without it since this
6563 code hurts cse'ing widened ops. */
6564 /* If source is a perverse subreg (such as QI treated as an SI),
6565 treat it as volatile. It may do the work of an SI in one context
6566 where the extra bits are not being used, but cannot replace an SI
6567 in general. */
6568 if (GET_CODE (src) == SUBREG
6569 && (GET_MODE_SIZE (GET_MODE (src))
6570 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6571 sets[i].src_volatile = 1;
6572 #endif
6574 /* Locate all possible equivalent forms for SRC. Try to replace
6575 SRC in the insn with each cheaper equivalent.
6577 We have the following types of equivalents: SRC itself, a folded
6578 version, a value given in a REG_EQUAL note, or a value related
6579 to a constant.
6581 Each of these equivalents may be part of an additional class
6582 of equivalents (if more than one is in the table, they must be in
6583 the same class; we check for this).
6585 If the source is volatile, we don't do any table lookups.
6587 We note any constant equivalent for possible later use in a
6588 REG_NOTE. */
6590 if (!sets[i].src_volatile)
6591 elt = lookup (src, sets[i].src_hash, mode);
6593 sets[i].src_elt = elt;
6595 if (elt && src_eqv_here && src_eqv_elt)
6597 if (elt->first_same_value != src_eqv_elt->first_same_value)
6599 /* The REG_EQUAL is indicating that two formerly distinct
6600 classes are now equivalent. So merge them. */
6601 merge_equiv_classes (elt, src_eqv_elt);
6602 src_eqv_hash = HASH (src_eqv, elt->mode);
6603 src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
6606 src_eqv_here = 0;
6609 else if (src_eqv_elt)
6610 elt = src_eqv_elt;
6612 /* Try to find a constant somewhere and record it in `src_const'.
6613 Record its table element, if any, in `src_const_elt'. Look in
6614 any known equivalences first. (If the constant is not in the
6615 table, also set `sets[i].src_const_hash'). */
6616 if (elt)
6617 for (p = elt->first_same_value; p; p = p->next_same_value)
6618 if (p->is_const)
6620 src_const = p->exp;
6621 src_const_elt = elt;
6622 break;
6625 if (src_const == 0
6626 && (CONSTANT_P (src_folded)
6627 /* Consider (minus (label_ref L1) (label_ref L2)) as
6628 "constant" here so we will record it. This allows us
6629 to fold switch statements when an ADDR_DIFF_VEC is used. */
6630 || (GET_CODE (src_folded) == MINUS
6631 && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
6632 && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
6633 src_const = src_folded, src_const_elt = elt;
6634 else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
6635 src_const = src_eqv_here, src_const_elt = src_eqv_elt;
6637 /* If we don't know if the constant is in the table, get its
6638 hash code and look it up. */
6639 if (src_const && src_const_elt == 0)
6641 sets[i].src_const_hash = HASH (src_const, mode);
6642 src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
6645 sets[i].src_const = src_const;
6646 sets[i].src_const_elt = src_const_elt;
6648 /* If the constant and our source are both in the table, mark them as
6649 equivalent. Otherwise, if a constant is in the table but the source
6650 isn't, set ELT to it. */
6651 if (src_const_elt && elt
6652 && src_const_elt->first_same_value != elt->first_same_value)
6653 merge_equiv_classes (elt, src_const_elt);
6654 else if (src_const_elt && elt == 0)
6655 elt = src_const_elt;
6657 /* See if there is a register linearly related to a constant
6658 equivalent of SRC. */
6659 if (src_const
6660 && (GET_CODE (src_const) == CONST
6661 || (src_const_elt && src_const_elt->related_value != 0)))
6663 src_related = use_related_value (src_const, src_const_elt);
6664 if (src_related)
6666 struct table_elt *src_related_elt
6667 = lookup (src_related, HASH (src_related, mode), mode);
6668 if (src_related_elt && elt)
6670 if (elt->first_same_value
6671 != src_related_elt->first_same_value)
6672 /* This can occur when we previously saw a CONST
6673 involving a SYMBOL_REF and then see the SYMBOL_REF
6674 twice. Merge the involved classes. */
6675 merge_equiv_classes (elt, src_related_elt);
6677 src_related = 0;
6678 src_related_elt = 0;
6680 else if (src_related_elt && elt == 0)
6681 elt = src_related_elt;
6685 /* See if we have a CONST_INT that is already in a register in a
6686 wider mode. */
6688 if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
6689 && GET_MODE_CLASS (mode) == MODE_INT
6690 && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
6692 enum machine_mode wider_mode;
6694 for (wider_mode = GET_MODE_WIDER_MODE (mode);
6695 GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
6696 && src_related == 0;
6697 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
6699 struct table_elt *const_elt
6700 = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
6702 if (const_elt == 0)
6703 continue;
6705 for (const_elt = const_elt->first_same_value;
6706 const_elt; const_elt = const_elt->next_same_value)
6707 if (GET_CODE (const_elt->exp) == REG)
6709 src_related = gen_lowpart_if_possible (mode,
6710 const_elt->exp);
6711 break;
6716 /* Another possibility is that we have an AND with a constant in
6717 a mode narrower than a word. If so, it might have been generated
6718 as part of an "if" which would narrow the AND. If we already
6719 have done the AND in a wider mode, we can use a SUBREG of that
6720 value. */
6722 if (flag_expensive_optimizations && ! src_related
6723 && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
6724 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6726 enum machine_mode tmode;
6727 rtx new_and = gen_rtx (AND, VOIDmode, NULL_RTX, XEXP (src, 1));
6729 for (tmode = GET_MODE_WIDER_MODE (mode);
6730 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6731 tmode = GET_MODE_WIDER_MODE (tmode))
6733 rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
6734 struct table_elt *larger_elt;
6736 if (inner)
6738 PUT_MODE (new_and, tmode);
6739 XEXP (new_and, 0) = inner;
6740 larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
6741 if (larger_elt == 0)
6742 continue;
6744 for (larger_elt = larger_elt->first_same_value;
6745 larger_elt; larger_elt = larger_elt->next_same_value)
6746 if (GET_CODE (larger_elt->exp) == REG)
6748 src_related
6749 = gen_lowpart_if_possible (mode, larger_elt->exp);
6750 break;
6753 if (src_related)
6754 break;
6759 #ifdef LOAD_EXTEND_OP
6760 /* See if a MEM has already been loaded with a widening operation;
6761 if it has, we can use a subreg of that. Many CISC machines
6762 also have such operations, but this is only likely to be
6763 beneficial these machines. */
6765 if (flag_expensive_optimizations && src_related == 0
6766 && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6767 && GET_MODE_CLASS (mode) == MODE_INT
6768 && GET_CODE (src) == MEM && ! do_not_record
6769 && LOAD_EXTEND_OP (mode) != NIL)
6771 enum machine_mode tmode;
6773 /* Set what we are trying to extend and the operation it might
6774 have been extended with. */
6775 PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
6776 XEXP (memory_extend_rtx, 0) = src;
6778 for (tmode = GET_MODE_WIDER_MODE (mode);
6779 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6780 tmode = GET_MODE_WIDER_MODE (tmode))
6782 struct table_elt *larger_elt;
6784 PUT_MODE (memory_extend_rtx, tmode);
6785 larger_elt = lookup (memory_extend_rtx,
6786 HASH (memory_extend_rtx, tmode), tmode);
6787 if (larger_elt == 0)
6788 continue;
6790 for (larger_elt = larger_elt->first_same_value;
6791 larger_elt; larger_elt = larger_elt->next_same_value)
6792 if (GET_CODE (larger_elt->exp) == REG)
6794 src_related = gen_lowpart_if_possible (mode,
6795 larger_elt->exp);
6796 break;
6799 if (src_related)
6800 break;
6803 #endif /* LOAD_EXTEND_OP */
6805 if (src == src_folded)
6806 src_folded = 0;
6808 /* At this point, ELT, if non-zero, points to a class of expressions
6809 equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
6810 and SRC_RELATED, if non-zero, each contain additional equivalent
6811 expressions. Prune these latter expressions by deleting expressions
6812 already in the equivalence class.
6814 Check for an equivalent identical to the destination. If found,
6815 this is the preferred equivalent since it will likely lead to
6816 elimination of the insn. Indicate this by placing it in
6817 `src_related'. */
6819 if (elt) elt = elt->first_same_value;
6820 for (p = elt; p; p = p->next_same_value)
6822 enum rtx_code code = GET_CODE (p->exp);
6824 /* If the expression is not valid, ignore it. Then we do not
6825 have to check for validity below. In most cases, we can use
6826 `rtx_equal_p', since canonicalization has already been done. */
6827 if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
6828 continue;
6830 /* Also skip paradoxical subregs, unless that's what we're
6831 looking for. */
6832 if (code == SUBREG
6833 && (GET_MODE_SIZE (GET_MODE (p->exp))
6834 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))
6835 && ! (src != 0
6836 && GET_CODE (src) == SUBREG
6837 && GET_MODE (src) == GET_MODE (p->exp)
6838 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6839 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))))
6840 continue;
6842 if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
6843 src = 0;
6844 else if (src_folded && GET_CODE (src_folded) == code
6845 && rtx_equal_p (src_folded, p->exp))
6846 src_folded = 0;
6847 else if (src_eqv_here && GET_CODE (src_eqv_here) == code
6848 && rtx_equal_p (src_eqv_here, p->exp))
6849 src_eqv_here = 0;
6850 else if (src_related && GET_CODE (src_related) == code
6851 && rtx_equal_p (src_related, p->exp))
6852 src_related = 0;
6854 /* This is the same as the destination of the insns, we want
6855 to prefer it. Copy it to src_related. The code below will
6856 then give it a negative cost. */
6857 if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
6858 src_related = dest;
6862 /* Find the cheapest valid equivalent, trying all the available
6863 possibilities. Prefer items not in the hash table to ones
6864 that are when they are equal cost. Note that we can never
6865 worsen an insn as the current contents will also succeed.
6866 If we find an equivalent identical to the destination, use it as best,
6867 since this insn will probably be eliminated in that case. */
6868 if (src)
6870 if (rtx_equal_p (src, dest))
6871 src_cost = -1;
6872 else
6873 src_cost = COST (src);
6876 if (src_eqv_here)
6878 if (rtx_equal_p (src_eqv_here, dest))
6879 src_eqv_cost = -1;
6880 else
6881 src_eqv_cost = COST (src_eqv_here);
6884 if (src_folded)
6886 if (rtx_equal_p (src_folded, dest))
6887 src_folded_cost = -1;
6888 else
6889 src_folded_cost = COST (src_folded);
6892 if (src_related)
6894 if (rtx_equal_p (src_related, dest))
6895 src_related_cost = -1;
6896 else
6897 src_related_cost = COST (src_related);
6900 /* If this was an indirect jump insn, a known label will really be
6901 cheaper even though it looks more expensive. */
6902 if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
6903 src_folded = src_const, src_folded_cost = -1;
6905 /* Terminate loop when replacement made. This must terminate since
6906 the current contents will be tested and will always be valid. */
6907 while (1)
6909 rtx trial;
6911 /* Skip invalid entries. */
6912 while (elt && GET_CODE (elt->exp) != REG
6913 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
6914 elt = elt->next_same_value;
6916 /* A paradoxical subreg would be bad here: it'll be the right
6917 size, but later may be adjusted so that the upper bits aren't
6918 what we want. So reject it. */
6919 if (elt != 0
6920 && GET_CODE (elt->exp) == SUBREG
6921 && (GET_MODE_SIZE (GET_MODE (elt->exp))
6922 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))
6923 /* It is okay, though, if the rtx we're trying to match
6924 will ignore any of the bits we can't predict. */
6925 && ! (src != 0
6926 && GET_CODE (src) == SUBREG
6927 && GET_MODE (src) == GET_MODE (elt->exp)
6928 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6929 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))))
6931 elt = elt->next_same_value;
6932 continue;
6935 if (elt) src_elt_cost = elt->cost;
6937 /* Find cheapest and skip it for the next time. For items
6938 of equal cost, use this order:
6939 src_folded, src, src_eqv, src_related and hash table entry. */
6940 if (src_folded_cost <= src_cost
6941 && src_folded_cost <= src_eqv_cost
6942 && src_folded_cost <= src_related_cost
6943 && src_folded_cost <= src_elt_cost)
6945 trial = src_folded, src_folded_cost = 10000;
6946 if (src_folded_force_flag)
6947 trial = force_const_mem (mode, trial);
6949 else if (src_cost <= src_eqv_cost
6950 && src_cost <= src_related_cost
6951 && src_cost <= src_elt_cost)
6952 trial = src, src_cost = 10000;
6953 else if (src_eqv_cost <= src_related_cost
6954 && src_eqv_cost <= src_elt_cost)
6955 trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
6956 else if (src_related_cost <= src_elt_cost)
6957 trial = copy_rtx (src_related), src_related_cost = 10000;
6958 else
6960 trial = copy_rtx (elt->exp);
6961 elt = elt->next_same_value;
6962 src_elt_cost = 10000;
6965 /* We don't normally have an insn matching (set (pc) (pc)), so
6966 check for this separately here. We will delete such an
6967 insn below.
6969 Tablejump insns contain a USE of the table, so simply replacing
6970 the operand with the constant won't match. This is simply an
6971 unconditional branch, however, and is therefore valid. Just
6972 insert the substitution here and we will delete and re-emit
6973 the insn later. */
6975 if (n_sets == 1 && dest == pc_rtx
6976 && (trial == pc_rtx
6977 || (GET_CODE (trial) == LABEL_REF
6978 && ! condjump_p (insn))))
6980 /* If TRIAL is a label in front of a jump table, we are
6981 really falling through the switch (this is how casesi
6982 insns work), so we must branch around the table. */
6983 if (GET_CODE (trial) == CODE_LABEL
6984 && NEXT_INSN (trial) != 0
6985 && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
6986 && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
6987 || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
6989 trial = gen_rtx (LABEL_REF, Pmode, get_label_after (trial));
6991 SET_SRC (sets[i].rtl) = trial;
6992 cse_jumps_altered = 1;
6993 break;
6996 /* Look for a substitution that makes a valid insn. */
6997 else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
6999 /* The result of apply_change_group can be ignored; see
7000 canon_reg. */
7002 validate_change (insn, &SET_SRC (sets[i].rtl),
7003 canon_reg (SET_SRC (sets[i].rtl), insn),
7005 apply_change_group ();
7006 break;
7009 /* If we previously found constant pool entries for
7010 constants and this is a constant, try making a
7011 pool entry. Put it in src_folded unless we already have done
7012 this since that is where it likely came from. */
7014 else if (constant_pool_entries_cost
7015 && CONSTANT_P (trial)
7016 && ! (GET_CODE (trial) == CONST
7017 && GET_CODE (XEXP (trial, 0)) == TRUNCATE)
7018 && (src_folded == 0
7019 || (GET_CODE (src_folded) != MEM
7020 && ! src_folded_force_flag))
7021 && GET_MODE_CLASS (mode) != MODE_CC)
7023 src_folded_force_flag = 1;
7024 src_folded = trial;
7025 src_folded_cost = constant_pool_entries_cost;
7029 src = SET_SRC (sets[i].rtl);
7031 /* In general, it is good to have a SET with SET_SRC == SET_DEST.
7032 However, there is an important exception: If both are registers
7033 that are not the head of their equivalence class, replace SET_SRC
7034 with the head of the class. If we do not do this, we will have
7035 both registers live over a portion of the basic block. This way,
7036 their lifetimes will likely abut instead of overlapping. */
7037 if (GET_CODE (dest) == REG
7038 && REGNO_QTY_VALID_P (REGNO (dest))
7039 && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
7040 && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
7041 && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
7042 /* Don't do this if the original insn had a hard reg as
7043 SET_SRC. */
7044 && (GET_CODE (sets[i].src) != REG
7045 || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
7046 /* We can't call canon_reg here because it won't do anything if
7047 SRC is a hard register. */
7049 int first = qty_first_reg[reg_qty[REGNO (src)]];
7051 src = SET_SRC (sets[i].rtl)
7052 = first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
7053 : gen_rtx (REG, GET_MODE (src), first);
7055 /* If we had a constant that is cheaper than what we are now
7056 setting SRC to, use that constant. We ignored it when we
7057 thought we could make this into a no-op. */
7058 if (src_const && COST (src_const) < COST (src)
7059 && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0))
7060 src = src_const;
7063 /* If we made a change, recompute SRC values. */
7064 if (src != sets[i].src)
7066 do_not_record = 0;
7067 hash_arg_in_memory = 0;
7068 hash_arg_in_struct = 0;
7069 sets[i].src = src;
7070 sets[i].src_hash = HASH (src, mode);
7071 sets[i].src_volatile = do_not_record;
7072 sets[i].src_in_memory = hash_arg_in_memory;
7073 sets[i].src_in_struct = hash_arg_in_struct;
7074 sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
7077 /* If this is a single SET, we are setting a register, and we have an
7078 equivalent constant, we want to add a REG_NOTE. We don't want
7079 to write a REG_EQUAL note for a constant pseudo since verifying that
7080 that pseudo hasn't been eliminated is a pain. Such a note also
7081 won't help anything. */
7082 if (n_sets == 1 && src_const && GET_CODE (dest) == REG
7083 && GET_CODE (src_const) != REG)
7085 tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
7087 /* Record the actual constant value in a REG_EQUAL note, making
7088 a new one if one does not already exist. */
7089 if (tem)
7090 XEXP (tem, 0) = src_const;
7091 else
7092 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
7093 src_const, REG_NOTES (insn));
7095 /* If storing a constant value in a register that
7096 previously held the constant value 0,
7097 record this fact with a REG_WAS_0 note on this insn.
7099 Note that the *register* is required to have previously held 0,
7100 not just any register in the quantity and we must point to the
7101 insn that set that register to zero.
7103 Rather than track each register individually, we just see if
7104 the last set for this quantity was for this register. */
7106 if (REGNO_QTY_VALID_P (REGNO (dest))
7107 && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
7109 /* See if we previously had a REG_WAS_0 note. */
7110 rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7111 rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
7113 if ((tem = single_set (const_insn)) != 0
7114 && rtx_equal_p (SET_DEST (tem), dest))
7116 if (note)
7117 XEXP (note, 0) = const_insn;
7118 else
7119 REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_WAS_0,
7120 const_insn, REG_NOTES (insn));
7125 /* Now deal with the destination. */
7126 do_not_record = 0;
7127 sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
7129 /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
7130 to the MEM or REG within it. */
7131 while (GET_CODE (dest) == SIGN_EXTRACT
7132 || GET_CODE (dest) == ZERO_EXTRACT
7133 || GET_CODE (dest) == SUBREG
7134 || GET_CODE (dest) == STRICT_LOW_PART)
7136 sets[i].inner_dest_loc = &XEXP (dest, 0);
7137 dest = XEXP (dest, 0);
7140 sets[i].inner_dest = dest;
7142 if (GET_CODE (dest) == MEM)
7144 dest = fold_rtx (dest, insn);
7146 /* Decide whether we invalidate everything in memory,
7147 or just things at non-fixed places.
7148 Writing a large aggregate must invalidate everything
7149 because we don't know how long it is. */
7150 note_mem_written (dest, &writes_memory);
7153 /* Compute the hash code of the destination now,
7154 before the effects of this instruction are recorded,
7155 since the register values used in the address computation
7156 are those before this instruction. */
7157 sets[i].dest_hash = HASH (dest, mode);
7159 /* Don't enter a bit-field in the hash table
7160 because the value in it after the store
7161 may not equal what was stored, due to truncation. */
7163 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
7164 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
7166 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
7168 if (src_const != 0 && GET_CODE (src_const) == CONST_INT
7169 && GET_CODE (width) == CONST_INT
7170 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
7171 && ! (INTVAL (src_const)
7172 & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
7173 /* Exception: if the value is constant,
7174 and it won't be truncated, record it. */
7176 else
7178 /* This is chosen so that the destination will be invalidated
7179 but no new value will be recorded.
7180 We must invalidate because sometimes constant
7181 values can be recorded for bitfields. */
7182 sets[i].src_elt = 0;
7183 sets[i].src_volatile = 1;
7184 src_eqv = 0;
7185 src_eqv_elt = 0;
7189 /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
7190 the insn. */
7191 else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
7193 PUT_CODE (insn, NOTE);
7194 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
7195 NOTE_SOURCE_FILE (insn) = 0;
7196 cse_jumps_altered = 1;
7197 /* One less use of the label this insn used to jump to. */
7198 --LABEL_NUSES (JUMP_LABEL (insn));
7199 /* No more processing for this set. */
7200 sets[i].rtl = 0;
7203 /* If this SET is now setting PC to a label, we know it used to
7204 be a conditional or computed branch. So we see if we can follow
7205 it. If it was a computed branch, delete it and re-emit. */
7206 else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
7208 rtx p;
7210 /* If this is not in the format for a simple branch and
7211 we are the only SET in it, re-emit it. */
7212 if (! simplejump_p (insn) && n_sets == 1)
7214 rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
7215 JUMP_LABEL (new) = XEXP (src, 0);
7216 LABEL_NUSES (XEXP (src, 0))++;
7217 delete_insn (insn);
7218 insn = new;
7220 else
7221 /* Otherwise, force rerecognition, since it probably had
7222 a different pattern before.
7223 This shouldn't really be necessary, since whatever
7224 changed the source value above should have done this.
7225 Until the right place is found, might as well do this here. */
7226 INSN_CODE (insn) = -1;
7228 /* Now that we've converted this jump to an unconditional jump,
7229 there is dead code after it. Delete the dead code until we
7230 reach a BARRIER, the end of the function, or a label. Do
7231 not delete NOTEs except for NOTE_INSN_DELETED since later
7232 phases assume these notes are retained. */
7234 p = insn;
7236 while (NEXT_INSN (p) != 0
7237 && GET_CODE (NEXT_INSN (p)) != BARRIER
7238 && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
7240 if (GET_CODE (NEXT_INSN (p)) != NOTE
7241 || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
7242 delete_insn (NEXT_INSN (p));
7243 else
7244 p = NEXT_INSN (p);
7247 /* If we don't have a BARRIER immediately after INSN, put one there.
7248 Much code assumes that there are no NOTEs between a JUMP_INSN and
7249 BARRIER. */
7251 if (NEXT_INSN (insn) == 0
7252 || GET_CODE (NEXT_INSN (insn)) != BARRIER)
7253 emit_barrier_before (NEXT_INSN (insn));
7255 /* We might have two BARRIERs separated by notes. Delete the second
7256 one if so. */
7258 if (p != insn && NEXT_INSN (p) != 0
7259 && GET_CODE (NEXT_INSN (p)) == BARRIER)
7260 delete_insn (NEXT_INSN (p));
7262 cse_jumps_altered = 1;
7263 sets[i].rtl = 0;
7266 /* If destination is volatile, invalidate it and then do no further
7267 processing for this assignment. */
7269 else if (do_not_record)
7271 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7272 || GET_CODE (dest) == MEM)
7273 invalidate (dest, VOIDmode);
7274 else if (GET_CODE (dest) == STRICT_LOW_PART
7275 || GET_CODE (dest) == ZERO_EXTRACT)
7276 invalidate (XEXP (dest, 0), GET_MODE (dest));
7277 sets[i].rtl = 0;
7280 if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
7281 sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);
7283 #ifdef HAVE_cc0
7284 /* If setting CC0, record what it was set to, or a constant, if it
7285 is equivalent to a constant. If it is being set to a floating-point
7286 value, make a COMPARE with the appropriate constant of 0. If we
7287 don't do this, later code can interpret this as a test against
7288 const0_rtx, which can cause problems if we try to put it into an
7289 insn as a floating-point operand. */
7290 if (dest == cc0_rtx)
7292 this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
7293 this_insn_cc0_mode = mode;
7294 if (FLOAT_MODE_P (mode))
7295 this_insn_cc0 = gen_rtx (COMPARE, VOIDmode, this_insn_cc0,
7296 CONST0_RTX (mode));
7298 #endif
7301 /* Now enter all non-volatile source expressions in the hash table
7302 if they are not already present.
7303 Record their equivalence classes in src_elt.
7304 This way we can insert the corresponding destinations into
7305 the same classes even if the actual sources are no longer in them
7306 (having been invalidated). */
7308 if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
7309 && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
7311 register struct table_elt *elt;
7312 register struct table_elt *classp = sets[0].src_elt;
7313 rtx dest = SET_DEST (sets[0].rtl);
7314 enum machine_mode eqvmode = GET_MODE (dest);
7316 if (GET_CODE (dest) == STRICT_LOW_PART)
7318 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
7319 classp = 0;
7321 if (insert_regs (src_eqv, classp, 0))
7323 rehash_using_reg (src_eqv);
7324 src_eqv_hash = HASH (src_eqv, eqvmode);
7326 elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
7327 elt->in_memory = src_eqv_in_memory;
7328 elt->in_struct = src_eqv_in_struct;
7329 src_eqv_elt = elt;
7331 /* Check to see if src_eqv_elt is the same as a set source which
7332 does not yet have an elt, and if so set the elt of the set source
7333 to src_eqv_elt. */
7334 for (i = 0; i < n_sets; i++)
7335 if (sets[i].rtl && sets[i].src_elt == 0
7336 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
7337 sets[i].src_elt = src_eqv_elt;
7340 for (i = 0; i < n_sets; i++)
7341 if (sets[i].rtl && ! sets[i].src_volatile
7342 && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
7344 if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
7346 /* REG_EQUAL in setting a STRICT_LOW_PART
7347 gives an equivalent for the entire destination register,
7348 not just for the subreg being stored in now.
7349 This is a more interesting equivalence, so we arrange later
7350 to treat the entire reg as the destination. */
7351 sets[i].src_elt = src_eqv_elt;
7352 sets[i].src_hash = src_eqv_hash;
7354 else
7356 /* Insert source and constant equivalent into hash table, if not
7357 already present. */
7358 register struct table_elt *classp = src_eqv_elt;
7359 register rtx src = sets[i].src;
7360 register rtx dest = SET_DEST (sets[i].rtl);
7361 enum machine_mode mode
7362 = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
7364 if (sets[i].src_elt == 0)
7366 register struct table_elt *elt;
7368 /* Note that these insert_regs calls cannot remove
7369 any of the src_elt's, because they would have failed to
7370 match if not still valid. */
7371 if (insert_regs (src, classp, 0))
7373 rehash_using_reg (src);
7374 sets[i].src_hash = HASH (src, mode);
7376 elt = insert (src, classp, sets[i].src_hash, mode);
7377 elt->in_memory = sets[i].src_in_memory;
7378 elt->in_struct = sets[i].src_in_struct;
7379 sets[i].src_elt = classp = elt;
7382 if (sets[i].src_const && sets[i].src_const_elt == 0
7383 && src != sets[i].src_const
7384 && ! rtx_equal_p (sets[i].src_const, src))
7385 sets[i].src_elt = insert (sets[i].src_const, classp,
7386 sets[i].src_const_hash, mode);
7389 else if (sets[i].src_elt == 0)
7390 /* If we did not insert the source into the hash table (e.g., it was
7391 volatile), note the equivalence class for the REG_EQUAL value, if any,
7392 so that the destination goes into that class. */
7393 sets[i].src_elt = src_eqv_elt;
7395 invalidate_from_clobbers (&writes_memory, x);
7397 /* Some registers are invalidated by subroutine calls. Memory is
7398 invalidated by non-constant calls. */
7400 if (GET_CODE (insn) == CALL_INSN)
7402 static struct write_data everything = {0, 1, 1, 1};
7404 if (! CONST_CALL_P (insn))
7405 invalidate_memory (&everything);
7406 invalidate_for_call ();
7409 /* Now invalidate everything set by this instruction.
7410 If a SUBREG or other funny destination is being set,
7411 sets[i].rtl is still nonzero, so here we invalidate the reg
7412 a part of which is being set. */
7414 for (i = 0; i < n_sets; i++)
7415 if (sets[i].rtl)
7417 /* We can't use the inner dest, because the mode associated with
7418 a ZERO_EXTRACT is significant. */
7419 register rtx dest = SET_DEST (sets[i].rtl);
7421 /* Needed for registers to remove the register from its
7422 previous quantity's chain.
7423 Needed for memory if this is a nonvarying address, unless
7424 we have just done an invalidate_memory that covers even those. */
7425 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7426 || (GET_CODE (dest) == MEM && ! writes_memory.all
7427 && ! cse_rtx_addr_varies_p (dest)))
7428 invalidate (dest, VOIDmode);
7429 else if (GET_CODE (dest) == STRICT_LOW_PART
7430 || GET_CODE (dest) == ZERO_EXTRACT)
7431 invalidate (XEXP (dest, 0), GET_MODE (dest));
7434 /* Make sure registers mentioned in destinations
7435 are safe for use in an expression to be inserted.
7436 This removes from the hash table
7437 any invalid entry that refers to one of these registers.
7439 We don't care about the return value from mention_regs because
7440 we are going to hash the SET_DEST values unconditionally. */
7442 for (i = 0; i < n_sets; i++)
7443 if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
7444 mention_regs (SET_DEST (sets[i].rtl));
7446 /* We may have just removed some of the src_elt's from the hash table.
7447 So replace each one with the current head of the same class. */
7449 for (i = 0; i < n_sets; i++)
7450 if (sets[i].rtl)
7452 if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
7453 /* If elt was removed, find current head of same class,
7454 or 0 if nothing remains of that class. */
7456 register struct table_elt *elt = sets[i].src_elt;
7458 while (elt && elt->prev_same_value)
7459 elt = elt->prev_same_value;
7461 while (elt && elt->first_same_value == 0)
7462 elt = elt->next_same_value;
7463 sets[i].src_elt = elt ? elt->first_same_value : 0;
7467 /* Now insert the destinations into their equivalence classes. */
7469 for (i = 0; i < n_sets; i++)
7470 if (sets[i].rtl)
7472 register rtx dest = SET_DEST (sets[i].rtl);
7473 register struct table_elt *elt;
7475 /* Don't record value if we are not supposed to risk allocating
7476 floating-point values in registers that might be wider than
7477 memory. */
7478 if ((flag_float_store
7479 && GET_CODE (dest) == MEM
7480 && FLOAT_MODE_P (GET_MODE (dest)))
7481 /* Don't record values of destinations set inside a libcall block
7482 since we might delete the libcall. Things should have been set
7483 up so we won't want to reuse such a value, but we play it safe
7484 here. */
7485 || in_libcall_block
7486 /* If we didn't put a REG_EQUAL value or a source into the hash
7487 table, there is no point is recording DEST. */
7488 || sets[i].src_elt == 0
7489 /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND
7490 or SIGN_EXTEND, don't record DEST since it can cause
7491 some tracking to be wrong.
7493 ??? Think about this more later. */
7494 || (GET_CODE (dest) == SUBREG
7495 && (GET_MODE_SIZE (GET_MODE (dest))
7496 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7497 && (GET_CODE (sets[i].src) == SIGN_EXTEND
7498 || GET_CODE (sets[i].src) == ZERO_EXTEND)))
7499 continue;
7501 /* STRICT_LOW_PART isn't part of the value BEING set,
7502 and neither is the SUBREG inside it.
7503 Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
7504 if (GET_CODE (dest) == STRICT_LOW_PART)
7505 dest = SUBREG_REG (XEXP (dest, 0));
7507 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
7508 /* Registers must also be inserted into chains for quantities. */
7509 if (insert_regs (dest, sets[i].src_elt, 1))
7511 /* If `insert_regs' changes something, the hash code must be
7512 recalculated. */
7513 rehash_using_reg (dest);
7514 sets[i].dest_hash = HASH (dest, GET_MODE (dest));
7517 elt = insert (dest, sets[i].src_elt,
7518 sets[i].dest_hash, GET_MODE (dest));
7519 elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
7520 && (! RTX_UNCHANGING_P (sets[i].inner_dest)
7521 || FIXED_BASE_PLUS_P (XEXP (sets[i].inner_dest,
7522 0))));
7524 if (elt->in_memory)
7526 /* This implicitly assumes a whole struct
7527 need not have MEM_IN_STRUCT_P.
7528 But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
7529 elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
7530 || sets[i].inner_dest != SET_DEST (sets[i].rtl));
7533 /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
7534 narrower than M2, and both M1 and M2 are the same number of words,
7535 we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
7536 make that equivalence as well.
7538 However, BAR may have equivalences for which gen_lowpart_if_possible
7539 will produce a simpler value than gen_lowpart_if_possible applied to
7540 BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
7541 BAR's equivalences. If we don't get a simplified form, make
7542 the SUBREG. It will not be used in an equivalence, but will
7543 cause two similar assignments to be detected.
7545 Note the loop below will find SUBREG_REG (DEST) since we have
7546 already entered SRC and DEST of the SET in the table. */
7548 if (GET_CODE (dest) == SUBREG
7549 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
7550 / UNITS_PER_WORD)
7551 == (GET_MODE_SIZE (GET_MODE (dest)) - 1)/ UNITS_PER_WORD)
7552 && (GET_MODE_SIZE (GET_MODE (dest))
7553 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7554 && sets[i].src_elt != 0)
7556 enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
7557 struct table_elt *elt, *classp = 0;
7559 for (elt = sets[i].src_elt->first_same_value; elt;
7560 elt = elt->next_same_value)
7562 rtx new_src = 0;
7563 unsigned src_hash;
7564 struct table_elt *src_elt;
7566 /* Ignore invalid entries. */
7567 if (GET_CODE (elt->exp) != REG
7568 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
7569 continue;
7571 new_src = gen_lowpart_if_possible (new_mode, elt->exp);
7572 if (new_src == 0)
7573 new_src = gen_rtx (SUBREG, new_mode, elt->exp, 0);
7575 src_hash = HASH (new_src, new_mode);
7576 src_elt = lookup (new_src, src_hash, new_mode);
7578 /* Put the new source in the hash table is if isn't
7579 already. */
7580 if (src_elt == 0)
7582 if (insert_regs (new_src, classp, 0))
7584 rehash_using_reg (new_src);
7585 src_hash = HASH (new_src, new_mode);
7587 src_elt = insert (new_src, classp, src_hash, new_mode);
7588 src_elt->in_memory = elt->in_memory;
7589 src_elt->in_struct = elt->in_struct;
7591 else if (classp && classp != src_elt->first_same_value)
7592 /* Show that two things that we've seen before are
7593 actually the same. */
7594 merge_equiv_classes (src_elt, classp);
7596 classp = src_elt->first_same_value;
7601 /* Special handling for (set REG0 REG1)
7602 where REG0 is the "cheapest", cheaper than REG1.
7603 After cse, REG1 will probably not be used in the sequel,
7604 so (if easily done) change this insn to (set REG1 REG0) and
7605 replace REG1 with REG0 in the previous insn that computed their value.
7606 Then REG1 will become a dead store and won't cloud the situation
7607 for later optimizations.
7609 Do not make this change if REG1 is a hard register, because it will
7610 then be used in the sequel and we may be changing a two-operand insn
7611 into a three-operand insn.
7613 Also do not do this if we are operating on a copy of INSN. */
7615 if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
7616 && NEXT_INSN (PREV_INSN (insn)) == insn
7617 && GET_CODE (SET_SRC (sets[0].rtl)) == REG
7618 && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
7619 && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
7620 && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
7621 == REGNO (SET_DEST (sets[0].rtl))))
7623 rtx prev = PREV_INSN (insn);
7624 while (prev && GET_CODE (prev) == NOTE)
7625 prev = PREV_INSN (prev);
7627 if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
7628 && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
7630 rtx dest = SET_DEST (sets[0].rtl);
7631 rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
7633 validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
7634 validate_change (insn, & SET_DEST (sets[0].rtl),
7635 SET_SRC (sets[0].rtl), 1);
7636 validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
7637 apply_change_group ();
7639 /* If REG1 was equivalent to a constant, REG0 is not. */
7640 if (note)
7641 PUT_REG_NOTE_KIND (note, REG_EQUAL);
7643 /* If there was a REG_WAS_0 note on PREV, remove it. Move
7644 any REG_WAS_0 note on INSN to PREV. */
7645 note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
7646 if (note)
7647 remove_note (prev, note);
7649 note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7650 if (note)
7652 remove_note (insn, note);
7653 XEXP (note, 1) = REG_NOTES (prev);
7654 REG_NOTES (prev) = note;
7657 /* If INSN has a REG_EQUAL note, and this note mentions REG0,
7658 then we must delete it, because the value in REG0 has changed. */
7659 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
7660 if (note && reg_mentioned_p (dest, XEXP (note, 0)))
7661 remove_note (insn, note);
7665 /* If this is a conditional jump insn, record any known equivalences due to
7666 the condition being tested. */
7668 last_jump_equiv_class = 0;
7669 if (GET_CODE (insn) == JUMP_INSN
7670 && n_sets == 1 && GET_CODE (x) == SET
7671 && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
7672 record_jump_equiv (insn, 0);
7674 #ifdef HAVE_cc0
7675 /* If the previous insn set CC0 and this insn no longer references CC0,
7676 delete the previous insn. Here we use the fact that nothing expects CC0
7677 to be valid over an insn, which is true until the final pass. */
7678 if (prev_insn && GET_CODE (prev_insn) == INSN
7679 && (tem = single_set (prev_insn)) != 0
7680 && SET_DEST (tem) == cc0_rtx
7681 && ! reg_mentioned_p (cc0_rtx, x))
7683 PUT_CODE (prev_insn, NOTE);
7684 NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
7685 NOTE_SOURCE_FILE (prev_insn) = 0;
7688 prev_insn_cc0 = this_insn_cc0;
7689 prev_insn_cc0_mode = this_insn_cc0_mode;
7690 #endif
7692 prev_insn = insn;
7695 /* Store 1 in *WRITES_PTR for those categories of memory ref
7696 that must be invalidated when the expression WRITTEN is stored in.
7697 If WRITTEN is null, say everything must be invalidated. */
7699 static void
7700 note_mem_written (written, writes_ptr)
7701 rtx written;
7702 struct write_data *writes_ptr;
7704 static struct write_data everything = {0, 1, 1, 1};
7706 if (written == 0)
7707 *writes_ptr = everything;
7708 else if (GET_CODE (written) == MEM)
7710 /* Pushing or popping the stack invalidates just the stack pointer. */
7711 rtx addr = XEXP (written, 0);
7712 if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
7713 || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
7714 && GET_CODE (XEXP (addr, 0)) == REG
7715 && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
7717 writes_ptr->sp = 1;
7718 return;
7720 else if (GET_MODE (written) == BLKmode)
7721 *writes_ptr = everything;
7722 else if (cse_rtx_addr_varies_p (written))
7724 /* A varying address that is a sum indicates an array element,
7725 and that's just as good as a structure element
7726 in implying that we need not invalidate scalar variables.
7727 However, we must allow QImode aliasing of scalars, because the
7728 ANSI C standard allows character pointers to alias anything.
7729 We must also allow AND addresses, because they may generate
7730 accesses outside the object being referenced. This is used to
7731 generate aligned addresses from unaligned addresses, for instance,
7732 the alpha storeqi_unaligned pattern. */
7733 if (! ((MEM_IN_STRUCT_P (written)
7734 || GET_CODE (XEXP (written, 0)) == PLUS)
7735 && GET_MODE (written) != QImode
7736 && GET_CODE (XEXP (written, 0)) != AND))
7737 writes_ptr->all = 1;
7738 writes_ptr->nonscalar = 1;
7740 writes_ptr->var = 1;
7744 /* Perform invalidation on the basis of everything about an insn
7745 except for invalidating the actual places that are SET in it.
7746 This includes the places CLOBBERed, and anything that might
7747 alias with something that is SET or CLOBBERed.
7749 W points to the writes_memory for this insn, a struct write_data
7750 saying which kinds of memory references must be invalidated.
7751 X is the pattern of the insn. */
7753 static void
7754 invalidate_from_clobbers (w, x)
7755 struct write_data *w;
7756 rtx x;
7758 /* If W->var is not set, W specifies no action.
7759 If W->all is set, this step gets all memory refs
7760 so they can be ignored in the rest of this function. */
7761 if (w->var)
7762 invalidate_memory (w);
7764 if (w->sp)
7766 if (reg_tick[STACK_POINTER_REGNUM] >= 0)
7767 reg_tick[STACK_POINTER_REGNUM]++;
7769 /* This should be *very* rare. */
7770 if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
7771 invalidate (stack_pointer_rtx, VOIDmode);
7774 if (GET_CODE (x) == CLOBBER)
7776 rtx ref = XEXP (x, 0);
7778 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7779 || (GET_CODE (ref) == MEM && ! w->all))
7780 invalidate (ref, VOIDmode);
7781 else if (GET_CODE (ref) == STRICT_LOW_PART
7782 || GET_CODE (ref) == ZERO_EXTRACT)
7783 invalidate (XEXP (ref, 0), GET_MODE (ref));
7785 else if (GET_CODE (x) == PARALLEL)
7787 register int i;
7788 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7790 register rtx y = XVECEXP (x, 0, i);
7791 if (GET_CODE (y) == CLOBBER)
7793 rtx ref = XEXP (y, 0);
7794 if (ref)
7796 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7797 || (GET_CODE (ref) == MEM && !w->all))
7798 invalidate (ref, VOIDmode);
7799 else if (GET_CODE (ref) == STRICT_LOW_PART
7800 || GET_CODE (ref) == ZERO_EXTRACT)
7801 invalidate (XEXP (ref, 0), GET_MODE (ref));
7808 /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
7809 and replace any registers in them with either an equivalent constant
7810 or the canonical form of the register. If we are inside an address,
7811 only do this if the address remains valid.
7813 OBJECT is 0 except when within a MEM in which case it is the MEM.
7815 Return the replacement for X. */
7817 static rtx
7818 cse_process_notes (x, object)
7819 rtx x;
7820 rtx object;
7822 enum rtx_code code = GET_CODE (x);
7823 char *fmt = GET_RTX_FORMAT (code);
7824 int i;
7826 switch (code)
7828 case CONST_INT:
7829 case CONST:
7830 case SYMBOL_REF:
7831 case LABEL_REF:
7832 case CONST_DOUBLE:
7833 case PC:
7834 case CC0:
7835 case LO_SUM:
7836 return x;
7838 case MEM:
7839 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
7840 return x;
7842 case EXPR_LIST:
7843 case INSN_LIST:
7844 if (REG_NOTE_KIND (x) == REG_EQUAL)
7845 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
7846 if (XEXP (x, 1))
7847 XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
7848 return x;
7850 case SIGN_EXTEND:
7851 case ZERO_EXTEND:
7852 case SUBREG:
7854 rtx new = cse_process_notes (XEXP (x, 0), object);
7855 /* We don't substitute VOIDmode constants into these rtx,
7856 since they would impede folding. */
7857 if (GET_MODE (new) != VOIDmode)
7858 validate_change (object, &XEXP (x, 0), new, 0);
7859 return x;
7862 case REG:
7863 i = reg_qty[REGNO (x)];
7865 /* Return a constant or a constant register. */
7866 if (REGNO_QTY_VALID_P (REGNO (x))
7867 && qty_const[i] != 0
7868 && (CONSTANT_P (qty_const[i])
7869 || GET_CODE (qty_const[i]) == REG))
7871 rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
7872 if (new)
7873 return new;
7876 /* Otherwise, canonicalize this register. */
7877 return canon_reg (x, NULL_RTX);
7879 default:
7880 break;
7883 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7884 if (fmt[i] == 'e')
7885 validate_change (object, &XEXP (x, i),
7886 cse_process_notes (XEXP (x, i), object), 0);
7888 return x;
7891 /* Find common subexpressions between the end test of a loop and the beginning
7892 of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
7894 Often we have a loop where an expression in the exit test is used
7895 in the body of the loop. For example "while (*p) *q++ = *p++;".
7896 Because of the way we duplicate the loop exit test in front of the loop,
7897 however, we don't detect that common subexpression. This will be caught
7898 when global cse is implemented, but this is a quite common case.
7900 This function handles the most common cases of these common expressions.
7901 It is called after we have processed the basic block ending with the
7902 NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
7903 jumps to a label used only once. */
7905 static void
7906 cse_around_loop (loop_start)
7907 rtx loop_start;
7909 rtx insn;
7910 int i;
7911 struct table_elt *p;
7913 /* If the jump at the end of the loop doesn't go to the start, we don't
7914 do anything. */
7915 for (insn = PREV_INSN (loop_start);
7916 insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
7917 insn = PREV_INSN (insn))
7920 if (insn == 0
7921 || GET_CODE (insn) != NOTE
7922 || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
7923 return;
7925 /* If the last insn of the loop (the end test) was an NE comparison,
7926 we will interpret it as an EQ comparison, since we fell through
7927 the loop. Any equivalences resulting from that comparison are
7928 therefore not valid and must be invalidated. */
7929 if (last_jump_equiv_class)
7930 for (p = last_jump_equiv_class->first_same_value; p;
7931 p = p->next_same_value)
7932 if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
7933 || (GET_CODE (p->exp) == SUBREG
7934 && GET_CODE (SUBREG_REG (p->exp)) == REG))
7935 invalidate (p->exp, VOIDmode);
7936 else if (GET_CODE (p->exp) == STRICT_LOW_PART
7937 || GET_CODE (p->exp) == ZERO_EXTRACT)
7938 invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
7940 /* Process insns starting after LOOP_START until we hit a CALL_INSN or
7941 a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
7943 The only thing we do with SET_DEST is invalidate entries, so we
7944 can safely process each SET in order. It is slightly less efficient
7945 to do so, but we only want to handle the most common cases. */
7947 for (insn = NEXT_INSN (loop_start);
7948 GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
7949 && ! (GET_CODE (insn) == NOTE
7950 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
7951 insn = NEXT_INSN (insn))
7953 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7954 && (GET_CODE (PATTERN (insn)) == SET
7955 || GET_CODE (PATTERN (insn)) == CLOBBER))
7956 cse_set_around_loop (PATTERN (insn), insn, loop_start);
7957 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7958 && GET_CODE (PATTERN (insn)) == PARALLEL)
7959 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
7960 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
7961 || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
7962 cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
7963 loop_start);
7967 /* Variable used for communications between the next two routines. */
7969 static struct write_data skipped_writes_memory;
7971 /* Process one SET of an insn that was skipped. We ignore CLOBBERs
7972 since they are done elsewhere. This function is called via note_stores. */
7974 static void
7975 invalidate_skipped_set (dest, set)
7976 rtx set;
7977 rtx dest;
7979 if (GET_CODE (dest) == MEM)
7980 note_mem_written (dest, &skipped_writes_memory);
7982 /* There are times when an address can appear varying and be a PLUS
7983 during this scan when it would be a fixed address were we to know
7984 the proper equivalences. So promote "nonscalar" to be "all". */
7985 if (skipped_writes_memory.nonscalar)
7986 skipped_writes_memory.all = 1;
7988 if (GET_CODE (set) == CLOBBER
7989 #ifdef HAVE_cc0
7990 || dest == cc0_rtx
7991 #endif
7992 || dest == pc_rtx)
7993 return;
7995 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7996 || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
7997 invalidate (dest, VOIDmode);
7998 else if (GET_CODE (dest) == STRICT_LOW_PART
7999 || GET_CODE (dest) == ZERO_EXTRACT)
8000 invalidate (XEXP (dest, 0), GET_MODE (dest));
8003 /* Invalidate all insns from START up to the end of the function or the
8004 next label. This called when we wish to CSE around a block that is
8005 conditionally executed. */
8007 static void
8008 invalidate_skipped_block (start)
8009 rtx start;
8011 rtx insn;
8012 static struct write_data init = {0, 0, 0, 0};
8013 static struct write_data everything = {0, 1, 1, 1};
8015 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
8016 insn = NEXT_INSN (insn))
8018 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
8019 continue;
8021 skipped_writes_memory = init;
8023 if (GET_CODE (insn) == CALL_INSN)
8025 invalidate_for_call ();
8026 skipped_writes_memory = everything;
8029 note_stores (PATTERN (insn), invalidate_skipped_set);
8030 invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn));
8034 /* Used for communication between the following two routines; contains a
8035 value to be checked for modification. */
8037 static rtx cse_check_loop_start_value;
8039 /* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
8040 indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
8042 static void
8043 cse_check_loop_start (x, set)
8044 rtx x;
8045 rtx set;
8047 if (cse_check_loop_start_value == 0
8048 || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
8049 return;
8051 if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
8052 || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
8053 cse_check_loop_start_value = 0;
8056 /* X is a SET or CLOBBER contained in INSN that was found near the start of
8057 a loop that starts with the label at LOOP_START.
8059 If X is a SET, we see if its SET_SRC is currently in our hash table.
8060 If so, we see if it has a value equal to some register used only in the
8061 loop exit code (as marked by jump.c).
8063 If those two conditions are true, we search backwards from the start of
8064 the loop to see if that same value was loaded into a register that still
8065 retains its value at the start of the loop.
8067 If so, we insert an insn after the load to copy the destination of that
8068 load into the equivalent register and (try to) replace our SET_SRC with that
8069 register.
8071 In any event, we invalidate whatever this SET or CLOBBER modifies. */
8073 static void
8074 cse_set_around_loop (x, insn, loop_start)
8075 rtx x;
8076 rtx insn;
8077 rtx loop_start;
8079 struct table_elt *src_elt;
8080 static struct write_data init = {0, 0, 0, 0};
8081 struct write_data writes_memory;
8083 writes_memory = init;
8085 /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
8086 are setting PC or CC0 or whose SET_SRC is already a register. */
8087 if (GET_CODE (x) == SET
8088 && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
8089 && GET_CODE (SET_SRC (x)) != REG)
8091 src_elt = lookup (SET_SRC (x),
8092 HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
8093 GET_MODE (SET_DEST (x)));
8095 if (src_elt)
8096 for (src_elt = src_elt->first_same_value; src_elt;
8097 src_elt = src_elt->next_same_value)
8098 if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
8099 && COST (src_elt->exp) < COST (SET_SRC (x)))
8101 rtx p, set;
8103 /* Look for an insn in front of LOOP_START that sets
8104 something in the desired mode to SET_SRC (x) before we hit
8105 a label or CALL_INSN. */
8107 for (p = prev_nonnote_insn (loop_start);
8108 p && GET_CODE (p) != CALL_INSN
8109 && GET_CODE (p) != CODE_LABEL;
8110 p = prev_nonnote_insn (p))
8111 if ((set = single_set (p)) != 0
8112 && GET_CODE (SET_DEST (set)) == REG
8113 && GET_MODE (SET_DEST (set)) == src_elt->mode
8114 && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
8116 /* We now have to ensure that nothing between P
8117 and LOOP_START modified anything referenced in
8118 SET_SRC (x). We know that nothing within the loop
8119 can modify it, or we would have invalidated it in
8120 the hash table. */
8121 rtx q;
8123 cse_check_loop_start_value = SET_SRC (x);
8124 for (q = p; q != loop_start; q = NEXT_INSN (q))
8125 if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
8126 note_stores (PATTERN (q), cse_check_loop_start);
8128 /* If nothing was changed and we can replace our
8129 SET_SRC, add an insn after P to copy its destination
8130 to what we will be replacing SET_SRC with. */
8131 if (cse_check_loop_start_value
8132 && validate_change (insn, &SET_SRC (x),
8133 src_elt->exp, 0))
8134 emit_insn_after (gen_move_insn (src_elt->exp,
8135 SET_DEST (set)),
8137 break;
8142 /* Now invalidate anything modified by X. */
8143 note_mem_written (SET_DEST (x), &writes_memory);
8145 if (writes_memory.var)
8146 invalidate_memory (&writes_memory);
8148 /* See comment on similar code in cse_insn for explanation of these
8149 tests. */
8150 if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
8151 || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all
8152 && ! cse_rtx_addr_varies_p (SET_DEST (x))))
8153 invalidate (SET_DEST (x), VOIDmode);
8154 else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
8155 || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
8156 invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
8159 /* Find the end of INSN's basic block and return its range,
8160 the total number of SETs in all the insns of the block, the last insn of the
8161 block, and the branch path.
8163 The branch path indicates which branches should be followed. If a non-zero
8164 path size is specified, the block should be rescanned and a different set
8165 of branches will be taken. The branch path is only used if
8166 FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
8168 DATA is a pointer to a struct cse_basic_block_data, defined below, that is
8169 used to describe the block. It is filled in with the information about
8170 the current block. The incoming structure's branch path, if any, is used
8171 to construct the output branch path. */
8173 void
8174 cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
8175 rtx insn;
8176 struct cse_basic_block_data *data;
8177 int follow_jumps;
8178 int after_loop;
8179 int skip_blocks;
8181 rtx p = insn, q;
8182 int nsets = 0;
8183 int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
8184 rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
8185 int path_size = data->path_size;
8186 int path_entry = 0;
8187 int i;
8189 /* Update the previous branch path, if any. If the last branch was
8190 previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
8191 shorten the path by one and look at the previous branch. We know that
8192 at least one branch must have been taken if PATH_SIZE is non-zero. */
8193 while (path_size > 0)
8195 if (data->path[path_size - 1].status != NOT_TAKEN)
8197 data->path[path_size - 1].status = NOT_TAKEN;
8198 break;
8200 else
8201 path_size--;
8204 /* Scan to end of this basic block. */
8205 while (p && GET_CODE (p) != CODE_LABEL)
8207 /* Don't cse out the end of a loop. This makes a difference
8208 only for the unusual loops that always execute at least once;
8209 all other loops have labels there so we will stop in any case.
8210 Cse'ing out the end of the loop is dangerous because it
8211 might cause an invariant expression inside the loop
8212 to be reused after the end of the loop. This would make it
8213 hard to move the expression out of the loop in loop.c,
8214 especially if it is one of several equivalent expressions
8215 and loop.c would like to eliminate it.
8217 If we are running after loop.c has finished, we can ignore
8218 the NOTE_INSN_LOOP_END. */
8220 if (! after_loop && GET_CODE (p) == NOTE
8221 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
8222 break;
8224 /* Don't cse over a call to setjmp; on some machines (eg vax)
8225 the regs restored by the longjmp come from
8226 a later time than the setjmp. */
8227 if (GET_CODE (p) == NOTE
8228 && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
8229 break;
8231 /* A PARALLEL can have lots of SETs in it,
8232 especially if it is really an ASM_OPERANDS. */
8233 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
8234 && GET_CODE (PATTERN (p)) == PARALLEL)
8235 nsets += XVECLEN (PATTERN (p), 0);
8236 else if (GET_CODE (p) != NOTE)
8237 nsets += 1;
8239 /* Ignore insns made by CSE; they cannot affect the boundaries of
8240 the basic block. */
8242 if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
8243 high_cuid = INSN_CUID (p);
8244 if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
8245 low_cuid = INSN_CUID (p);
8247 /* See if this insn is in our branch path. If it is and we are to
8248 take it, do so. */
8249 if (path_entry < path_size && data->path[path_entry].branch == p)
8251 if (data->path[path_entry].status != NOT_TAKEN)
8252 p = JUMP_LABEL (p);
8254 /* Point to next entry in path, if any. */
8255 path_entry++;
8258 /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
8259 was specified, we haven't reached our maximum path length, there are
8260 insns following the target of the jump, this is the only use of the
8261 jump label, and the target label is preceded by a BARRIER.
8263 Alternatively, we can follow the jump if it branches around a
8264 block of code and there are no other branches into the block.
8265 In this case invalidate_skipped_block will be called to invalidate any
8266 registers set in the block when following the jump. */
8268 else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
8269 && GET_CODE (p) == JUMP_INSN
8270 && GET_CODE (PATTERN (p)) == SET
8271 && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
8272 && LABEL_NUSES (JUMP_LABEL (p)) == 1
8273 && NEXT_INSN (JUMP_LABEL (p)) != 0)
8275 for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
8276 if ((GET_CODE (q) != NOTE
8277 || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
8278 || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
8279 && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
8280 break;
8282 /* If we ran into a BARRIER, this code is an extension of the
8283 basic block when the branch is taken. */
8284 if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
8286 /* Don't allow ourself to keep walking around an
8287 always-executed loop. */
8288 if (next_real_insn (q) == next)
8290 p = NEXT_INSN (p);
8291 continue;
8294 /* Similarly, don't put a branch in our path more than once. */
8295 for (i = 0; i < path_entry; i++)
8296 if (data->path[i].branch == p)
8297 break;
8299 if (i != path_entry)
8300 break;
8302 data->path[path_entry].branch = p;
8303 data->path[path_entry++].status = TAKEN;
8305 /* This branch now ends our path. It was possible that we
8306 didn't see this branch the last time around (when the
8307 insn in front of the target was a JUMP_INSN that was
8308 turned into a no-op). */
8309 path_size = path_entry;
8311 p = JUMP_LABEL (p);
8312 /* Mark block so we won't scan it again later. */
8313 PUT_MODE (NEXT_INSN (p), QImode);
8315 /* Detect a branch around a block of code. */
8316 else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
8318 register rtx tmp;
8320 if (next_real_insn (q) == next)
8322 p = NEXT_INSN (p);
8323 continue;
8326 for (i = 0; i < path_entry; i++)
8327 if (data->path[i].branch == p)
8328 break;
8330 if (i != path_entry)
8331 break;
8333 /* This is no_labels_between_p (p, q) with an added check for
8334 reaching the end of a function (in case Q precedes P). */
8335 for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
8336 if (GET_CODE (tmp) == CODE_LABEL)
8337 break;
8339 if (tmp == q)
8341 data->path[path_entry].branch = p;
8342 data->path[path_entry++].status = AROUND;
8344 path_size = path_entry;
8346 p = JUMP_LABEL (p);
8347 /* Mark block so we won't scan it again later. */
8348 PUT_MODE (NEXT_INSN (p), QImode);
8352 p = NEXT_INSN (p);
8355 data->low_cuid = low_cuid;
8356 data->high_cuid = high_cuid;
8357 data->nsets = nsets;
8358 data->last = p;
8360 /* If all jumps in the path are not taken, set our path length to zero
8361 so a rescan won't be done. */
8362 for (i = path_size - 1; i >= 0; i--)
8363 if (data->path[i].status != NOT_TAKEN)
8364 break;
8366 if (i == -1)
8367 data->path_size = 0;
8368 else
8369 data->path_size = path_size;
8371 /* End the current branch path. */
8372 data->path[path_size].branch = 0;
8375 /* Perform cse on the instructions of a function.
8376 F is the first instruction.
8377 NREGS is one plus the highest pseudo-reg number used in the instruction.
8379 AFTER_LOOP is 1 if this is the cse call done after loop optimization
8380 (only if -frerun-cse-after-loop).
8382 Returns 1 if jump_optimize should be redone due to simplifications
8383 in conditional jump instructions. */
8386 cse_main (f, nregs, after_loop, file)
8387 rtx f;
8388 int nregs;
8389 int after_loop;
8390 FILE *file;
8392 struct cse_basic_block_data val;
8393 register rtx insn = f;
8394 register int i;
8396 cse_jumps_altered = 0;
8397 recorded_label_ref = 0;
8398 constant_pool_entries_cost = 0;
8399 val.path_size = 0;
8401 init_recog ();
8403 max_reg = nregs;
8405 all_minus_one = (int *) alloca (nregs * sizeof (int));
8406 consec_ints = (int *) alloca (nregs * sizeof (int));
8408 for (i = 0; i < nregs; i++)
8410 all_minus_one[i] = -1;
8411 consec_ints[i] = i;
8414 reg_next_eqv = (int *) alloca (nregs * sizeof (int));
8415 reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
8416 reg_qty = (int *) alloca (nregs * sizeof (int));
8417 reg_in_table = (int *) alloca (nregs * sizeof (int));
8418 reg_tick = (int *) alloca (nregs * sizeof (int));
8420 #ifdef LOAD_EXTEND_OP
8422 /* Allocate scratch rtl here. cse_insn will fill in the memory reference
8423 and change the code and mode as appropriate. */
8424 memory_extend_rtx = gen_rtx (ZERO_EXTEND, VOIDmode, NULL_RTX);
8425 #endif
8427 /* Discard all the free elements of the previous function
8428 since they are allocated in the temporarily obstack. */
8429 bzero ((char *) table, sizeof table);
8430 free_element_chain = 0;
8431 n_elements_made = 0;
8433 /* Find the largest uid. */
8435 max_uid = get_max_uid ();
8436 uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
8437 bzero ((char *) uid_cuid, (max_uid + 1) * sizeof (int));
8439 /* Compute the mapping from uids to cuids.
8440 CUIDs are numbers assigned to insns, like uids,
8441 except that cuids increase monotonically through the code.
8442 Don't assign cuids to line-number NOTEs, so that the distance in cuids
8443 between two insns is not affected by -g. */
8445 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
8447 if (GET_CODE (insn) != NOTE
8448 || NOTE_LINE_NUMBER (insn) < 0)
8449 INSN_CUID (insn) = ++i;
8450 else
8451 /* Give a line number note the same cuid as preceding insn. */
8452 INSN_CUID (insn) = i;
8455 /* Initialize which registers are clobbered by calls. */
8457 CLEAR_HARD_REG_SET (regs_invalidated_by_call);
8459 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8460 if ((call_used_regs[i]
8461 /* Used to check !fixed_regs[i] here, but that isn't safe;
8462 fixed regs are still call-clobbered, and sched can get
8463 confused if they can "live across calls".
8465 The frame pointer is always preserved across calls. The arg
8466 pointer is if it is fixed. The stack pointer usually is, unless
8467 RETURN_POPS_ARGS, in which case an explicit CLOBBER
8468 will be present. If we are generating PIC code, the PIC offset
8469 table register is preserved across calls. */
8471 && i != STACK_POINTER_REGNUM
8472 && i != FRAME_POINTER_REGNUM
8473 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
8474 && i != HARD_FRAME_POINTER_REGNUM
8475 #endif
8476 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
8477 && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
8478 #endif
8479 #if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
8480 && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
8481 #endif
8483 || global_regs[i])
8484 SET_HARD_REG_BIT (regs_invalidated_by_call, i);
8486 /* Loop over basic blocks.
8487 Compute the maximum number of qty's needed for each basic block
8488 (which is 2 for each SET). */
8489 insn = f;
8490 while (insn)
8492 cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
8493 flag_cse_skip_blocks);
8495 /* If this basic block was already processed or has no sets, skip it. */
8496 if (val.nsets == 0 || GET_MODE (insn) == QImode)
8498 PUT_MODE (insn, VOIDmode);
8499 insn = (val.last ? NEXT_INSN (val.last) : 0);
8500 val.path_size = 0;
8501 continue;
8504 cse_basic_block_start = val.low_cuid;
8505 cse_basic_block_end = val.high_cuid;
8506 max_qty = val.nsets * 2;
8508 if (file)
8509 fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
8510 INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
8511 val.nsets);
8513 /* Make MAX_QTY bigger to give us room to optimize
8514 past the end of this basic block, if that should prove useful. */
8515 if (max_qty < 500)
8516 max_qty = 500;
8518 max_qty += max_reg;
8520 /* If this basic block is being extended by following certain jumps,
8521 (see `cse_end_of_basic_block'), we reprocess the code from the start.
8522 Otherwise, we start after this basic block. */
8523 if (val.path_size > 0)
8524 cse_basic_block (insn, val.last, val.path, 0);
8525 else
8527 int old_cse_jumps_altered = cse_jumps_altered;
8528 rtx temp;
8530 /* When cse changes a conditional jump to an unconditional
8531 jump, we want to reprocess the block, since it will give
8532 us a new branch path to investigate. */
8533 cse_jumps_altered = 0;
8534 temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
8535 if (cse_jumps_altered == 0
8536 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8537 insn = temp;
8539 cse_jumps_altered |= old_cse_jumps_altered;
8542 #ifdef USE_C_ALLOCA
8543 alloca (0);
8544 #endif
8547 /* Tell refers_to_mem_p that qty_const info is not available. */
8548 qty_const = 0;
8550 if (max_elements_made < n_elements_made)
8551 max_elements_made = n_elements_made;
8553 return cse_jumps_altered || recorded_label_ref;
8556 /* Process a single basic block. FROM and TO and the limits of the basic
8557 block. NEXT_BRANCH points to the branch path when following jumps or
8558 a null path when not following jumps.
8560 AROUND_LOOP is non-zero if we are to try to cse around to the start of a
8561 loop. This is true when we are being called for the last time on a
8562 block and this CSE pass is before loop.c. */
8564 static rtx
8565 cse_basic_block (from, to, next_branch, around_loop)
8566 register rtx from, to;
8567 struct branch_path *next_branch;
8568 int around_loop;
8570 register rtx insn;
8571 int to_usage = 0;
8572 int in_libcall_block = 0;
8573 int num_insns = 0;
8575 /* Each of these arrays is undefined before max_reg, so only allocate
8576 the space actually needed and adjust the start below. */
8578 qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8579 qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8580 qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
8581 qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8582 qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8583 qty_comparison_code
8584 = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
8585 qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8586 qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8588 qty_first_reg -= max_reg;
8589 qty_last_reg -= max_reg;
8590 qty_mode -= max_reg;
8591 qty_const -= max_reg;
8592 qty_const_insn -= max_reg;
8593 qty_comparison_code -= max_reg;
8594 qty_comparison_qty -= max_reg;
8595 qty_comparison_const -= max_reg;
8597 new_basic_block ();
8599 /* TO might be a label. If so, protect it from being deleted. */
8600 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8601 ++LABEL_NUSES (to);
8603 for (insn = from; insn != to; insn = NEXT_INSN (insn))
8605 register enum rtx_code code;
8606 int i;
8607 struct table_elt *p, *next;
8609 /* If we have processed 1,000 insns, flush the hash table to avoid
8610 extreme quadratic behavior.
8612 ??? This is a real kludge and needs to be done some other way.
8613 Perhaps for 2.9. */
8614 if (num_insns++ > 1000)
8616 for (i = 0; i < NBUCKETS; i++)
8617 for (p = table[i]; p; p = next)
8619 next = p->next_same_hash;
8621 if (GET_CODE (p->exp) == REG)
8622 invalidate (p->exp, p->mode);
8623 else
8624 remove_from_table (p, i);
8627 num_insns = 0;
8630 /* See if this is a branch that is part of the path. If so, and it is
8631 to be taken, do so. */
8632 if (next_branch->branch == insn)
8634 enum taken status = next_branch++->status;
8635 if (status != NOT_TAKEN)
8637 if (status == TAKEN)
8638 record_jump_equiv (insn, 1);
8639 else
8640 invalidate_skipped_block (NEXT_INSN (insn));
8642 /* Set the last insn as the jump insn; it doesn't affect cc0.
8643 Then follow this branch. */
8644 #ifdef HAVE_cc0
8645 prev_insn_cc0 = 0;
8646 #endif
8647 prev_insn = insn;
8648 insn = JUMP_LABEL (insn);
8649 continue;
8653 code = GET_CODE (insn);
8654 if (GET_MODE (insn) == QImode)
8655 PUT_MODE (insn, VOIDmode);
8657 if (GET_RTX_CLASS (code) == 'i')
8659 /* Process notes first so we have all notes in canonical forms when
8660 looking for duplicate operations. */
8662 if (REG_NOTES (insn))
8663 REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
8665 /* Track when we are inside in LIBCALL block. Inside such a block,
8666 we do not want to record destinations. The last insn of a
8667 LIBCALL block is not considered to be part of the block, since
8668 its destination is the result of the block and hence should be
8669 recorded. */
8671 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8672 in_libcall_block = 1;
8673 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8674 in_libcall_block = 0;
8676 cse_insn (insn, in_libcall_block);
8679 /* If INSN is now an unconditional jump, skip to the end of our
8680 basic block by pretending that we just did the last insn in the
8681 basic block. If we are jumping to the end of our block, show
8682 that we can have one usage of TO. */
8684 if (simplejump_p (insn))
8686 if (to == 0)
8687 return 0;
8689 if (JUMP_LABEL (insn) == to)
8690 to_usage = 1;
8692 /* Maybe TO was deleted because the jump is unconditional.
8693 If so, there is nothing left in this basic block. */
8694 /* ??? Perhaps it would be smarter to set TO
8695 to whatever follows this insn,
8696 and pretend the basic block had always ended here. */
8697 if (INSN_DELETED_P (to))
8698 break;
8700 insn = PREV_INSN (to);
8703 /* See if it is ok to keep on going past the label
8704 which used to end our basic block. Remember that we incremented
8705 the count of that label, so we decrement it here. If we made
8706 a jump unconditional, TO_USAGE will be one; in that case, we don't
8707 want to count the use in that jump. */
8709 if (to != 0 && NEXT_INSN (insn) == to
8710 && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
8712 struct cse_basic_block_data val;
8713 rtx prev;
8715 insn = NEXT_INSN (to);
8717 if (LABEL_NUSES (to) == 0)
8718 insn = delete_insn (to);
8720 /* If TO was the last insn in the function, we are done. */
8721 if (insn == 0)
8722 return 0;
8724 /* If TO was preceded by a BARRIER we are done with this block
8725 because it has no continuation. */
8726 prev = prev_nonnote_insn (to);
8727 if (prev && GET_CODE (prev) == BARRIER)
8728 return insn;
8730 /* Find the end of the following block. Note that we won't be
8731 following branches in this case. */
8732 to_usage = 0;
8733 val.path_size = 0;
8734 cse_end_of_basic_block (insn, &val, 0, 0, 0);
8736 /* If the tables we allocated have enough space left
8737 to handle all the SETs in the next basic block,
8738 continue through it. Otherwise, return,
8739 and that block will be scanned individually. */
8740 if (val.nsets * 2 + next_qty > max_qty)
8741 break;
8743 cse_basic_block_start = val.low_cuid;
8744 cse_basic_block_end = val.high_cuid;
8745 to = val.last;
8747 /* Prevent TO from being deleted if it is a label. */
8748 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8749 ++LABEL_NUSES (to);
8751 /* Back up so we process the first insn in the extension. */
8752 insn = PREV_INSN (insn);
8756 if (next_qty > max_qty)
8757 abort ();
8759 /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
8760 the previous insn is the only insn that branches to the head of a loop,
8761 we can cse into the loop. Don't do this if we changed the jump
8762 structure of a loop unless we aren't going to be following jumps. */
8764 if ((cse_jumps_altered == 0
8765 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8766 && around_loop && to != 0
8767 && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
8768 && GET_CODE (PREV_INSN (to)) == JUMP_INSN
8769 && JUMP_LABEL (PREV_INSN (to)) != 0
8770 && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
8771 cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
8773 return to ? NEXT_INSN (to) : 0;
8776 /* Count the number of times registers are used (not set) in X.
8777 COUNTS is an array in which we accumulate the count, INCR is how much
8778 we count each register usage.
8780 Don't count a usage of DEST, which is the SET_DEST of a SET which
8781 contains X in its SET_SRC. This is because such a SET does not
8782 modify the liveness of DEST. */
8784 static void
8785 count_reg_usage (x, counts, dest, incr)
8786 rtx x;
8787 int *counts;
8788 rtx dest;
8789 int incr;
8791 enum rtx_code code;
8792 char *fmt;
8793 int i, j;
8795 if (x == 0)
8796 return;
8798 switch (code = GET_CODE (x))
8800 case REG:
8801 if (x != dest)
8802 counts[REGNO (x)] += incr;
8803 return;
8805 case PC:
8806 case CC0:
8807 case CONST:
8808 case CONST_INT:
8809 case CONST_DOUBLE:
8810 case SYMBOL_REF:
8811 case LABEL_REF:
8812 case CLOBBER:
8813 return;
8815 case SET:
8816 /* Unless we are setting a REG, count everything in SET_DEST. */
8817 if (GET_CODE (SET_DEST (x)) != REG)
8818 count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
8820 /* If SRC has side-effects, then we can't delete this insn, so the
8821 usage of SET_DEST inside SRC counts.
8823 ??? Strictly-speaking, we might be preserving this insn
8824 because some other SET has side-effects, but that's hard
8825 to do and can't happen now. */
8826 count_reg_usage (SET_SRC (x), counts,
8827 side_effects_p (SET_SRC (x)) ? NULL_RTX : SET_DEST (x),
8828 incr);
8829 return;
8831 case CALL_INSN:
8832 count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, NULL_RTX, incr);
8834 /* ... falls through ... */
8835 case INSN:
8836 case JUMP_INSN:
8837 count_reg_usage (PATTERN (x), counts, NULL_RTX, incr);
8839 /* Things used in a REG_EQUAL note aren't dead since loop may try to
8840 use them. */
8842 count_reg_usage (REG_NOTES (x), counts, NULL_RTX, incr);
8843 return;
8845 case EXPR_LIST:
8846 case INSN_LIST:
8847 if (REG_NOTE_KIND (x) == REG_EQUAL
8848 || GET_CODE (XEXP (x,0)) == USE)
8849 count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
8850 count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
8851 return;
8853 default:
8854 break;
8857 fmt = GET_RTX_FORMAT (code);
8858 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8860 if (fmt[i] == 'e')
8861 count_reg_usage (XEXP (x, i), counts, dest, incr);
8862 else if (fmt[i] == 'E')
8863 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8864 count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
8868 /* Scan all the insns and delete any that are dead; i.e., they store a register
8869 that is never used or they copy a register to itself.
8871 This is used to remove insns made obviously dead by cse. It improves the
8872 heuristics in loop since it won't try to move dead invariants out of loops
8873 or make givs for dead quantities. The remaining passes of the compilation
8874 are also sped up. */
8876 void
8877 delete_dead_from_cse (insns, nreg)
8878 rtx insns;
8879 int nreg;
8881 int *counts = (int *) alloca (nreg * sizeof (int));
8882 rtx insn, prev;
8883 rtx tem;
8884 int i;
8885 int in_libcall = 0;
8887 /* First count the number of times each register is used. */
8888 bzero ((char *) counts, sizeof (int) * nreg);
8889 for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
8890 count_reg_usage (insn, counts, NULL_RTX, 1);
8892 /* Go from the last insn to the first and delete insns that only set unused
8893 registers or copy a register to itself. As we delete an insn, remove
8894 usage counts for registers it uses. */
8895 for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
8897 int live_insn = 0;
8899 prev = prev_real_insn (insn);
8901 /* Don't delete any insns that are part of a libcall block.
8902 Flow or loop might get confused if we did that. Remember
8903 that we are scanning backwards. */
8904 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8905 in_libcall = 1;
8907 if (in_libcall)
8908 live_insn = 1;
8909 else if (GET_CODE (PATTERN (insn)) == SET)
8911 if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
8912 && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
8915 #ifdef HAVE_cc0
8916 else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
8917 && ! side_effects_p (SET_SRC (PATTERN (insn)))
8918 && ((tem = next_nonnote_insn (insn)) == 0
8919 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8920 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8922 #endif
8923 else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
8924 || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
8925 || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
8926 || side_effects_p (SET_SRC (PATTERN (insn))))
8927 live_insn = 1;
8929 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8930 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8932 rtx elt = XVECEXP (PATTERN (insn), 0, i);
8934 if (GET_CODE (elt) == SET)
8936 if (GET_CODE (SET_DEST (elt)) == REG
8937 && SET_DEST (elt) == SET_SRC (elt))
8940 #ifdef HAVE_cc0
8941 else if (GET_CODE (SET_DEST (elt)) == CC0
8942 && ! side_effects_p (SET_SRC (elt))
8943 && ((tem = next_nonnote_insn (insn)) == 0
8944 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8945 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8947 #endif
8948 else if (GET_CODE (SET_DEST (elt)) != REG
8949 || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
8950 || counts[REGNO (SET_DEST (elt))] != 0
8951 || side_effects_p (SET_SRC (elt)))
8952 live_insn = 1;
8954 else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
8955 live_insn = 1;
8957 else
8958 live_insn = 1;
8960 /* If this is a dead insn, delete it and show registers in it aren't
8961 being used. */
8963 if (! live_insn)
8965 count_reg_usage (insn, counts, NULL_RTX, -1);
8966 delete_insn (insn);
8969 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8970 in_libcall = 0;