PR c++/54021
[official-gcc.git] / gcc / combine.c
bloba07c046503e3465f904c2bba477c0ae0a5521b1c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This module is essentially the "combiner" phase of the U. of Arizona
23 Portable Optimizer, but redone to work on our list-structured
24 representation for RTL instead of their string representation.
26 The LOG_LINKS of each insn identify the most recent assignment
27 to each REG used in the insn. It is a list of previous insns,
28 each of which contains a SET for a REG that is used in this insn
29 and not used or set in between. LOG_LINKs never cross basic blocks.
30 They were set up by the preceding pass (lifetime analysis).
32 We try to combine each pair of insns joined by a logical link.
33 We also try to combine triples of insns A, B and C when
34 C has a link back to B and B has a link back to A.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "tm.h"
82 #include "rtl.h"
83 #include "tree.h"
84 #include "tm_p.h"
85 #include "flags.h"
86 #include "regs.h"
87 #include "hard-reg-set.h"
88 #include "basic-block.h"
89 #include "insn-config.h"
90 #include "function.h"
91 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
92 #include "expr.h"
93 #include "insn-attr.h"
94 #include "recog.h"
95 #include "diagnostic-core.h"
96 #include "target.h"
97 #include "optabs.h"
98 #include "insn-codes.h"
99 #include "rtlhooks-def.h"
100 #include "params.h"
101 #include "tree-pass.h"
102 #include "df.h"
103 #include "cgraph.h"
104 #include "obstack.h"
106 /* Number of attempts to combine instructions in this function. */
108 static int combine_attempts;
110 /* Number of attempts that got as far as substitution in this function. */
112 static int combine_merges;
114 /* Number of instructions combined with added SETs in this function. */
116 static int combine_extras;
118 /* Number of instructions combined in this function. */
120 static int combine_successes;
122 /* Totals over entire compilation. */
124 static int total_attempts, total_merges, total_extras, total_successes;
126 /* combine_instructions may try to replace the right hand side of the
127 second instruction with the value of an associated REG_EQUAL note
128 before throwing it at try_combine. That is problematic when there
129 is a REG_DEAD note for a register used in the old right hand side
130 and can cause distribute_notes to do wrong things. This is the
131 second instruction if it has been so modified, null otherwise. */
133 static rtx i2mod;
135 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
137 static rtx i2mod_old_rhs;
139 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
141 static rtx i2mod_new_rhs;
143 typedef struct reg_stat_struct {
144 /* Record last point of death of (hard or pseudo) register n. */
145 rtx last_death;
147 /* Record last point of modification of (hard or pseudo) register n. */
148 rtx last_set;
150 /* The next group of fields allows the recording of the last value assigned
151 to (hard or pseudo) register n. We use this information to see if an
152 operation being processed is redundant given a prior operation performed
153 on the register. For example, an `and' with a constant is redundant if
154 all the zero bits are already known to be turned off.
156 We use an approach similar to that used by cse, but change it in the
157 following ways:
159 (1) We do not want to reinitialize at each label.
160 (2) It is useful, but not critical, to know the actual value assigned
161 to a register. Often just its form is helpful.
163 Therefore, we maintain the following fields:
165 last_set_value the last value assigned
166 last_set_label records the value of label_tick when the
167 register was assigned
168 last_set_table_tick records the value of label_tick when a
169 value using the register is assigned
170 last_set_invalid set to nonzero when it is not valid
171 to use the value of this register in some
172 register's value
174 To understand the usage of these tables, it is important to understand
175 the distinction between the value in last_set_value being valid and
176 the register being validly contained in some other expression in the
177 table.
179 (The next two parameters are out of date).
181 reg_stat[i].last_set_value is valid if it is nonzero, and either
182 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
184 Register I may validly appear in any expression returned for the value
185 of another register if reg_n_sets[i] is 1. It may also appear in the
186 value for register J if reg_stat[j].last_set_invalid is zero, or
187 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
189 If an expression is found in the table containing a register which may
190 not validly appear in an expression, the register is replaced by
191 something that won't match, (clobber (const_int 0)). */
193 /* Record last value assigned to (hard or pseudo) register n. */
195 rtx last_set_value;
197 /* Record the value of label_tick when an expression involving register n
198 is placed in last_set_value. */
200 int last_set_table_tick;
202 /* Record the value of label_tick when the value for register n is placed in
203 last_set_value. */
205 int last_set_label;
207 /* These fields are maintained in parallel with last_set_value and are
208 used to store the mode in which the register was last set, the bits
209 that were known to be zero when it was last set, and the number of
210 sign bits copies it was known to have when it was last set. */
212 unsigned HOST_WIDE_INT last_set_nonzero_bits;
213 char last_set_sign_bit_copies;
214 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
216 /* Set nonzero if references to register n in expressions should not be
217 used. last_set_invalid is set nonzero when this register is being
218 assigned to and last_set_table_tick == label_tick. */
220 char last_set_invalid;
222 /* Some registers that are set more than once and used in more than one
223 basic block are nevertheless always set in similar ways. For example,
224 a QImode register may be loaded from memory in two places on a machine
225 where byte loads zero extend.
227 We record in the following fields if a register has some leading bits
228 that are always equal to the sign bit, and what we know about the
229 nonzero bits of a register, specifically which bits are known to be
230 zero.
232 If an entry is zero, it means that we don't know anything special. */
234 unsigned char sign_bit_copies;
236 unsigned HOST_WIDE_INT nonzero_bits;
238 /* Record the value of the label_tick when the last truncation
239 happened. The field truncated_to_mode is only valid if
240 truncation_label == label_tick. */
242 int truncation_label;
244 /* Record the last truncation seen for this register. If truncation
245 is not a nop to this mode we might be able to save an explicit
246 truncation if we know that value already contains a truncated
247 value. */
249 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
250 } reg_stat_type;
252 DEF_VEC_O(reg_stat_type);
253 DEF_VEC_ALLOC_O(reg_stat_type,heap);
255 static VEC(reg_stat_type,heap) *reg_stat;
257 /* Record the luid of the last insn that invalidated memory
258 (anything that writes memory, and subroutine calls, but not pushes). */
260 static int mem_last_set;
262 /* Record the luid of the last CALL_INSN
263 so we can tell whether a potential combination crosses any calls. */
265 static int last_call_luid;
267 /* When `subst' is called, this is the insn that is being modified
268 (by combining in a previous insn). The PATTERN of this insn
269 is still the old pattern partially modified and it should not be
270 looked at, but this may be used to examine the successors of the insn
271 to judge whether a simplification is valid. */
273 static rtx subst_insn;
275 /* This is the lowest LUID that `subst' is currently dealing with.
276 get_last_value will not return a value if the register was set at or
277 after this LUID. If not for this mechanism, we could get confused if
278 I2 or I1 in try_combine were an insn that used the old value of a register
279 to obtain a new value. In that case, we might erroneously get the
280 new value of the register when we wanted the old one. */
282 static int subst_low_luid;
284 /* This contains any hard registers that are used in newpat; reg_dead_at_p
285 must consider all these registers to be always live. */
287 static HARD_REG_SET newpat_used_regs;
289 /* This is an insn to which a LOG_LINKS entry has been added. If this
290 insn is the earlier than I2 or I3, combine should rescan starting at
291 that location. */
293 static rtx added_links_insn;
295 /* Basic block in which we are performing combines. */
296 static basic_block this_basic_block;
297 static bool optimize_this_for_speed_p;
300 /* Length of the currently allocated uid_insn_cost array. */
302 static int max_uid_known;
304 /* The following array records the insn_rtx_cost for every insn
305 in the instruction stream. */
307 static int *uid_insn_cost;
309 /* The following array records the LOG_LINKS for every insn in the
310 instruction stream as struct insn_link pointers. */
312 struct insn_link {
313 rtx insn;
314 struct insn_link *next;
317 static struct insn_link **uid_log_links;
319 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
320 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
322 #define FOR_EACH_LOG_LINK(L, INSN) \
323 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
325 /* Links for LOG_LINKS are allocated from this obstack. */
327 static struct obstack insn_link_obstack;
329 /* Allocate a link. */
331 static inline struct insn_link *
332 alloc_insn_link (rtx insn, struct insn_link *next)
334 struct insn_link *l
335 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
336 sizeof (struct insn_link));
337 l->insn = insn;
338 l->next = next;
339 return l;
342 /* Incremented for each basic block. */
344 static int label_tick;
346 /* Reset to label_tick for each extended basic block in scanning order. */
348 static int label_tick_ebb_start;
350 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
351 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
353 static enum machine_mode nonzero_bits_mode;
355 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
356 be safely used. It is zero while computing them and after combine has
357 completed. This former test prevents propagating values based on
358 previously set values, which can be incorrect if a variable is modified
359 in a loop. */
361 static int nonzero_sign_valid;
364 /* Record one modification to rtl structure
365 to be undone by storing old_contents into *where. */
367 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
369 struct undo
371 struct undo *next;
372 enum undo_kind kind;
373 union { rtx r; int i; enum machine_mode m; struct insn_link *l; } old_contents;
374 union { rtx *r; int *i; struct insn_link **l; } where;
377 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
378 num_undo says how many are currently recorded.
380 other_insn is nonzero if we have modified some other insn in the process
381 of working on subst_insn. It must be verified too. */
383 struct undobuf
385 struct undo *undos;
386 struct undo *frees;
387 rtx other_insn;
390 static struct undobuf undobuf;
392 /* Number of times the pseudo being substituted for
393 was found and replaced. */
395 static int n_occurrences;
397 static rtx reg_nonzero_bits_for_combine (const_rtx, enum machine_mode, const_rtx,
398 enum machine_mode,
399 unsigned HOST_WIDE_INT,
400 unsigned HOST_WIDE_INT *);
401 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, enum machine_mode, const_rtx,
402 enum machine_mode,
403 unsigned int, unsigned int *);
404 static void do_SUBST (rtx *, rtx);
405 static void do_SUBST_INT (int *, int);
406 static void init_reg_last (void);
407 static void setup_incoming_promotions (rtx);
408 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
409 static int cant_combine_insn_p (rtx);
410 static int can_combine_p (rtx, rtx, rtx, rtx, rtx, rtx, rtx *, rtx *);
411 static int combinable_i3pat (rtx, rtx *, rtx, rtx, rtx, int, int, rtx *);
412 static int contains_muldiv (rtx);
413 static rtx try_combine (rtx, rtx, rtx, rtx, int *, rtx);
414 static void undo_all (void);
415 static void undo_commit (void);
416 static rtx *find_split_point (rtx *, rtx, bool);
417 static rtx subst (rtx, rtx, rtx, int, int, int);
418 static rtx combine_simplify_rtx (rtx, enum machine_mode, int, int);
419 static rtx simplify_if_then_else (rtx);
420 static rtx simplify_set (rtx);
421 static rtx simplify_logical (rtx);
422 static rtx expand_compound_operation (rtx);
423 static const_rtx expand_field_assignment (const_rtx);
424 static rtx make_extraction (enum machine_mode, rtx, HOST_WIDE_INT,
425 rtx, unsigned HOST_WIDE_INT, int, int, int);
426 static rtx extract_left_shift (rtx, int);
427 static rtx make_compound_operation (rtx, enum rtx_code);
428 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
429 unsigned HOST_WIDE_INT *);
430 static rtx canon_reg_for_combine (rtx, rtx);
431 static rtx force_to_mode (rtx, enum machine_mode,
432 unsigned HOST_WIDE_INT, int);
433 static rtx if_then_else_cond (rtx, rtx *, rtx *);
434 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
435 static int rtx_equal_for_field_assignment_p (rtx, rtx);
436 static rtx make_field_assignment (rtx);
437 static rtx apply_distributive_law (rtx);
438 static rtx distribute_and_simplify_rtx (rtx, int);
439 static rtx simplify_and_const_int_1 (enum machine_mode, rtx,
440 unsigned HOST_WIDE_INT);
441 static rtx simplify_and_const_int (rtx, enum machine_mode, rtx,
442 unsigned HOST_WIDE_INT);
443 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
444 HOST_WIDE_INT, enum machine_mode, int *);
445 static rtx simplify_shift_const_1 (enum rtx_code, enum machine_mode, rtx, int);
446 static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx,
447 int);
448 static int recog_for_combine (rtx *, rtx, rtx *);
449 static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
450 static enum rtx_code simplify_compare_const (enum rtx_code, rtx, rtx *);
451 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
452 static void update_table_tick (rtx);
453 static void record_value_for_reg (rtx, rtx, rtx);
454 static void check_promoted_subreg (rtx, rtx);
455 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
456 static void record_dead_and_set_regs (rtx);
457 static int get_last_value_validate (rtx *, rtx, int, int);
458 static rtx get_last_value (const_rtx);
459 static int use_crosses_set_p (const_rtx, int);
460 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
461 static int reg_dead_at_p (rtx, rtx);
462 static void move_deaths (rtx, rtx, int, rtx, rtx *);
463 static int reg_bitfield_target_p (rtx, rtx);
464 static void distribute_notes (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
465 static void distribute_links (struct insn_link *);
466 static void mark_used_regs_combine (rtx);
467 static void record_promoted_value (rtx, rtx);
468 static int unmentioned_reg_p_1 (rtx *, void *);
469 static bool unmentioned_reg_p (rtx, rtx);
470 static int record_truncated_value (rtx *, void *);
471 static void record_truncated_values (rtx *, void *);
472 static bool reg_truncated_to_mode (enum machine_mode, const_rtx);
473 static rtx gen_lowpart_or_truncate (enum machine_mode, rtx);
476 /* It is not safe to use ordinary gen_lowpart in combine.
477 See comments in gen_lowpart_for_combine. */
478 #undef RTL_HOOKS_GEN_LOWPART
479 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
481 /* Our implementation of gen_lowpart never emits a new pseudo. */
482 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
483 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
485 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
486 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
488 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
489 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
491 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
492 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
494 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
497 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
498 PATTERN can not be split. Otherwise, it returns an insn sequence.
499 This is a wrapper around split_insns which ensures that the
500 reg_stat vector is made larger if the splitter creates a new
501 register. */
503 static rtx
504 combine_split_insns (rtx pattern, rtx insn)
506 rtx ret;
507 unsigned int nregs;
509 ret = split_insns (pattern, insn);
510 nregs = max_reg_num ();
511 if (nregs > VEC_length (reg_stat_type, reg_stat))
512 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
513 return ret;
516 /* This is used by find_single_use to locate an rtx in LOC that
517 contains exactly one use of DEST, which is typically either a REG
518 or CC0. It returns a pointer to the innermost rtx expression
519 containing DEST. Appearances of DEST that are being used to
520 totally replace it are not counted. */
522 static rtx *
523 find_single_use_1 (rtx dest, rtx *loc)
525 rtx x = *loc;
526 enum rtx_code code = GET_CODE (x);
527 rtx *result = NULL;
528 rtx *this_result;
529 int i;
530 const char *fmt;
532 switch (code)
534 case CONST_INT:
535 case CONST:
536 case LABEL_REF:
537 case SYMBOL_REF:
538 case CONST_DOUBLE:
539 case CONST_VECTOR:
540 case CLOBBER:
541 return 0;
543 case SET:
544 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
545 of a REG that occupies all of the REG, the insn uses DEST if
546 it is mentioned in the destination or the source. Otherwise, we
547 need just check the source. */
548 if (GET_CODE (SET_DEST (x)) != CC0
549 && GET_CODE (SET_DEST (x)) != PC
550 && !REG_P (SET_DEST (x))
551 && ! (GET_CODE (SET_DEST (x)) == SUBREG
552 && REG_P (SUBREG_REG (SET_DEST (x)))
553 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
554 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
555 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
556 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
557 break;
559 return find_single_use_1 (dest, &SET_SRC (x));
561 case MEM:
562 case SUBREG:
563 return find_single_use_1 (dest, &XEXP (x, 0));
565 default:
566 break;
569 /* If it wasn't one of the common cases above, check each expression and
570 vector of this code. Look for a unique usage of DEST. */
572 fmt = GET_RTX_FORMAT (code);
573 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
575 if (fmt[i] == 'e')
577 if (dest == XEXP (x, i)
578 || (REG_P (dest) && REG_P (XEXP (x, i))
579 && REGNO (dest) == REGNO (XEXP (x, i))))
580 this_result = loc;
581 else
582 this_result = find_single_use_1 (dest, &XEXP (x, i));
584 if (result == NULL)
585 result = this_result;
586 else if (this_result)
587 /* Duplicate usage. */
588 return NULL;
590 else if (fmt[i] == 'E')
592 int j;
594 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
596 if (XVECEXP (x, i, j) == dest
597 || (REG_P (dest)
598 && REG_P (XVECEXP (x, i, j))
599 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
600 this_result = loc;
601 else
602 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
604 if (result == NULL)
605 result = this_result;
606 else if (this_result)
607 return NULL;
612 return result;
616 /* See if DEST, produced in INSN, is used only a single time in the
617 sequel. If so, return a pointer to the innermost rtx expression in which
618 it is used.
620 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
622 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
623 care about REG_DEAD notes or LOG_LINKS.
625 Otherwise, we find the single use by finding an insn that has a
626 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
627 only referenced once in that insn, we know that it must be the first
628 and last insn referencing DEST. */
630 static rtx *
631 find_single_use (rtx dest, rtx insn, rtx *ploc)
633 basic_block bb;
634 rtx next;
635 rtx *result;
636 struct insn_link *link;
638 #ifdef HAVE_cc0
639 if (dest == cc0_rtx)
641 next = NEXT_INSN (insn);
642 if (next == 0
643 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
644 return 0;
646 result = find_single_use_1 (dest, &PATTERN (next));
647 if (result && ploc)
648 *ploc = next;
649 return result;
651 #endif
653 if (!REG_P (dest))
654 return 0;
656 bb = BLOCK_FOR_INSN (insn);
657 for (next = NEXT_INSN (insn);
658 next && BLOCK_FOR_INSN (next) == bb;
659 next = NEXT_INSN (next))
660 if (INSN_P (next) && dead_or_set_p (next, dest))
662 FOR_EACH_LOG_LINK (link, next)
663 if (link->insn == insn)
664 break;
666 if (link)
668 result = find_single_use_1 (dest, &PATTERN (next));
669 if (ploc)
670 *ploc = next;
671 return result;
675 return 0;
678 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
679 insn. The substitution can be undone by undo_all. If INTO is already
680 set to NEWVAL, do not record this change. Because computing NEWVAL might
681 also call SUBST, we have to compute it before we put anything into
682 the undo table. */
684 static void
685 do_SUBST (rtx *into, rtx newval)
687 struct undo *buf;
688 rtx oldval = *into;
690 if (oldval == newval)
691 return;
693 /* We'd like to catch as many invalid transformations here as
694 possible. Unfortunately, there are way too many mode changes
695 that are perfectly valid, so we'd waste too much effort for
696 little gain doing the checks here. Focus on catching invalid
697 transformations involving integer constants. */
698 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
699 && CONST_INT_P (newval))
701 /* Sanity check that we're replacing oldval with a CONST_INT
702 that is a valid sign-extension for the original mode. */
703 gcc_assert (INTVAL (newval)
704 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
706 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
707 CONST_INT is not valid, because after the replacement, the
708 original mode would be gone. Unfortunately, we can't tell
709 when do_SUBST is called to replace the operand thereof, so we
710 perform this test on oldval instead, checking whether an
711 invalid replacement took place before we got here. */
712 gcc_assert (!(GET_CODE (oldval) == SUBREG
713 && CONST_INT_P (SUBREG_REG (oldval))));
714 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
715 && CONST_INT_P (XEXP (oldval, 0))));
718 if (undobuf.frees)
719 buf = undobuf.frees, undobuf.frees = buf->next;
720 else
721 buf = XNEW (struct undo);
723 buf->kind = UNDO_RTX;
724 buf->where.r = into;
725 buf->old_contents.r = oldval;
726 *into = newval;
728 buf->next = undobuf.undos, undobuf.undos = buf;
731 #define SUBST(INTO, NEWVAL) do_SUBST(&(INTO), (NEWVAL))
733 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
734 for the value of a HOST_WIDE_INT value (including CONST_INT) is
735 not safe. */
737 static void
738 do_SUBST_INT (int *into, int newval)
740 struct undo *buf;
741 int oldval = *into;
743 if (oldval == newval)
744 return;
746 if (undobuf.frees)
747 buf = undobuf.frees, undobuf.frees = buf->next;
748 else
749 buf = XNEW (struct undo);
751 buf->kind = UNDO_INT;
752 buf->where.i = into;
753 buf->old_contents.i = oldval;
754 *into = newval;
756 buf->next = undobuf.undos, undobuf.undos = buf;
759 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL))
761 /* Similar to SUBST, but just substitute the mode. This is used when
762 changing the mode of a pseudo-register, so that any other
763 references to the entry in the regno_reg_rtx array will change as
764 well. */
766 static void
767 do_SUBST_MODE (rtx *into, enum machine_mode newval)
769 struct undo *buf;
770 enum machine_mode oldval = GET_MODE (*into);
772 if (oldval == newval)
773 return;
775 if (undobuf.frees)
776 buf = undobuf.frees, undobuf.frees = buf->next;
777 else
778 buf = XNEW (struct undo);
780 buf->kind = UNDO_MODE;
781 buf->where.r = into;
782 buf->old_contents.m = oldval;
783 adjust_reg_mode (*into, newval);
785 buf->next = undobuf.undos, undobuf.undos = buf;
788 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE(&(INTO), (NEWVAL))
790 #ifndef HAVE_cc0
791 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
793 static void
794 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
796 struct undo *buf;
797 struct insn_link * oldval = *into;
799 if (oldval == newval)
800 return;
802 if (undobuf.frees)
803 buf = undobuf.frees, undobuf.frees = buf->next;
804 else
805 buf = XNEW (struct undo);
807 buf->kind = UNDO_LINKS;
808 buf->where.l = into;
809 buf->old_contents.l = oldval;
810 *into = newval;
812 buf->next = undobuf.undos, undobuf.undos = buf;
815 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
816 #endif
818 /* Subroutine of try_combine. Determine whether the replacement patterns
819 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
820 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
821 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
822 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
823 of all the instructions can be estimated and the replacements are more
824 expensive than the original sequence. */
826 static bool
827 combine_validate_cost (rtx i0, rtx i1, rtx i2, rtx i3, rtx newpat,
828 rtx newi2pat, rtx newotherpat)
830 int i0_cost, i1_cost, i2_cost, i3_cost;
831 int new_i2_cost, new_i3_cost;
832 int old_cost, new_cost;
834 /* Lookup the original insn_rtx_costs. */
835 i2_cost = INSN_COST (i2);
836 i3_cost = INSN_COST (i3);
838 if (i1)
840 i1_cost = INSN_COST (i1);
841 if (i0)
843 i0_cost = INSN_COST (i0);
844 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
845 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
847 else
849 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
850 ? i1_cost + i2_cost + i3_cost : 0);
851 i0_cost = 0;
854 else
856 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
857 i1_cost = i0_cost = 0;
860 /* Calculate the replacement insn_rtx_costs. */
861 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
862 if (newi2pat)
864 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
865 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
866 ? new_i2_cost + new_i3_cost : 0;
868 else
870 new_cost = new_i3_cost;
871 new_i2_cost = 0;
874 if (undobuf.other_insn)
876 int old_other_cost, new_other_cost;
878 old_other_cost = INSN_COST (undobuf.other_insn);
879 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
880 if (old_other_cost > 0 && new_other_cost > 0)
882 old_cost += old_other_cost;
883 new_cost += new_other_cost;
885 else
886 old_cost = 0;
889 /* Disallow this combination if both new_cost and old_cost are greater than
890 zero, and new_cost is greater than old cost. */
891 if (old_cost > 0 && new_cost > old_cost)
893 if (dump_file)
895 if (i0)
897 fprintf (dump_file,
898 "rejecting combination of insns %d, %d, %d and %d\n",
899 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2),
900 INSN_UID (i3));
901 fprintf (dump_file, "original costs %d + %d + %d + %d = %d\n",
902 i0_cost, i1_cost, i2_cost, i3_cost, old_cost);
904 else if (i1)
906 fprintf (dump_file,
907 "rejecting combination of insns %d, %d and %d\n",
908 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
909 fprintf (dump_file, "original costs %d + %d + %d = %d\n",
910 i1_cost, i2_cost, i3_cost, old_cost);
912 else
914 fprintf (dump_file,
915 "rejecting combination of insns %d and %d\n",
916 INSN_UID (i2), INSN_UID (i3));
917 fprintf (dump_file, "original costs %d + %d = %d\n",
918 i2_cost, i3_cost, old_cost);
921 if (newi2pat)
923 fprintf (dump_file, "replacement costs %d + %d = %d\n",
924 new_i2_cost, new_i3_cost, new_cost);
926 else
927 fprintf (dump_file, "replacement cost %d\n", new_cost);
930 return false;
933 /* Update the uid_insn_cost array with the replacement costs. */
934 INSN_COST (i2) = new_i2_cost;
935 INSN_COST (i3) = new_i3_cost;
936 if (i1)
938 INSN_COST (i1) = 0;
939 if (i0)
940 INSN_COST (i0) = 0;
943 return true;
947 /* Delete any insns that copy a register to itself. */
949 static void
950 delete_noop_moves (void)
952 rtx insn, next;
953 basic_block bb;
955 FOR_EACH_BB (bb)
957 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
959 next = NEXT_INSN (insn);
960 if (INSN_P (insn) && noop_move_p (insn))
962 if (dump_file)
963 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
965 delete_insn_and_edges (insn);
972 /* Fill in log links field for all insns. */
974 static void
975 create_log_links (void)
977 basic_block bb;
978 rtx *next_use, insn;
979 df_ref *def_vec, *use_vec;
981 next_use = XCNEWVEC (rtx, max_reg_num ());
983 /* Pass through each block from the end, recording the uses of each
984 register and establishing log links when def is encountered.
985 Note that we do not clear next_use array in order to save time,
986 so we have to test whether the use is in the same basic block as def.
988 There are a few cases below when we do not consider the definition or
989 usage -- these are taken from original flow.c did. Don't ask me why it is
990 done this way; I don't know and if it works, I don't want to know. */
992 FOR_EACH_BB (bb)
994 FOR_BB_INSNS_REVERSE (bb, insn)
996 if (!NONDEBUG_INSN_P (insn))
997 continue;
999 /* Log links are created only once. */
1000 gcc_assert (!LOG_LINKS (insn));
1002 for (def_vec = DF_INSN_DEFS (insn); *def_vec; def_vec++)
1004 df_ref def = *def_vec;
1005 int regno = DF_REF_REGNO (def);
1006 rtx use_insn;
1008 if (!next_use[regno])
1009 continue;
1011 /* Do not consider if it is pre/post modification in MEM. */
1012 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1013 continue;
1015 /* Do not make the log link for frame pointer. */
1016 if ((regno == FRAME_POINTER_REGNUM
1017 && (! reload_completed || frame_pointer_needed))
1018 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1019 || (regno == HARD_FRAME_POINTER_REGNUM
1020 && (! reload_completed || frame_pointer_needed))
1021 #endif
1022 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1023 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
1024 #endif
1026 continue;
1028 use_insn = next_use[regno];
1029 if (BLOCK_FOR_INSN (use_insn) == bb)
1031 /* flow.c claimed:
1033 We don't build a LOG_LINK for hard registers contained
1034 in ASM_OPERANDs. If these registers get replaced,
1035 we might wind up changing the semantics of the insn,
1036 even if reload can make what appear to be valid
1037 assignments later. */
1038 if (regno >= FIRST_PSEUDO_REGISTER
1039 || asm_noperands (PATTERN (use_insn)) < 0)
1041 /* Don't add duplicate links between instructions. */
1042 struct insn_link *links;
1043 FOR_EACH_LOG_LINK (links, use_insn)
1044 if (insn == links->insn)
1045 break;
1047 if (!links)
1048 LOG_LINKS (use_insn)
1049 = alloc_insn_link (insn, LOG_LINKS (use_insn));
1052 next_use[regno] = NULL_RTX;
1055 for (use_vec = DF_INSN_USES (insn); *use_vec; use_vec++)
1057 df_ref use = *use_vec;
1058 int regno = DF_REF_REGNO (use);
1060 /* Do not consider the usage of the stack pointer
1061 by function call. */
1062 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1063 continue;
1065 next_use[regno] = insn;
1070 free (next_use);
1073 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1074 true if we found a LOG_LINK that proves that A feeds B. This only works
1075 if there are no instructions between A and B which could have a link
1076 depending on A, since in that case we would not record a link for B.
1077 We also check the implicit dependency created by a cc0 setter/user
1078 pair. */
1080 static bool
1081 insn_a_feeds_b (rtx a, rtx b)
1083 struct insn_link *links;
1084 FOR_EACH_LOG_LINK (links, b)
1085 if (links->insn == a)
1086 return true;
1087 #ifdef HAVE_cc0
1088 if (sets_cc0_p (a))
1089 return true;
1090 #endif
1091 return false;
1094 /* Main entry point for combiner. F is the first insn of the function.
1095 NREGS is the first unused pseudo-reg number.
1097 Return nonzero if the combiner has turned an indirect jump
1098 instruction into a direct jump. */
1099 static int
1100 combine_instructions (rtx f, unsigned int nregs)
1102 rtx insn, next;
1103 #ifdef HAVE_cc0
1104 rtx prev;
1105 #endif
1106 struct insn_link *links, *nextlinks;
1107 rtx first;
1108 basic_block last_bb;
1110 int new_direct_jump_p = 0;
1112 for (first = f; first && !INSN_P (first); )
1113 first = NEXT_INSN (first);
1114 if (!first)
1115 return 0;
1117 combine_attempts = 0;
1118 combine_merges = 0;
1119 combine_extras = 0;
1120 combine_successes = 0;
1122 rtl_hooks = combine_rtl_hooks;
1124 VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
1126 init_recog_no_volatile ();
1128 /* Allocate array for insn info. */
1129 max_uid_known = get_max_uid ();
1130 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1131 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1132 gcc_obstack_init (&insn_link_obstack);
1134 nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1136 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1137 problems when, for example, we have j <<= 1 in a loop. */
1139 nonzero_sign_valid = 0;
1140 label_tick = label_tick_ebb_start = 1;
1142 /* Scan all SETs and see if we can deduce anything about what
1143 bits are known to be zero for some registers and how many copies
1144 of the sign bit are known to exist for those registers.
1146 Also set any known values so that we can use it while searching
1147 for what bits are known to be set. */
1149 setup_incoming_promotions (first);
1150 /* Allow the entry block and the first block to fall into the same EBB.
1151 Conceptually the incoming promotions are assigned to the entry block. */
1152 last_bb = ENTRY_BLOCK_PTR;
1154 create_log_links ();
1155 FOR_EACH_BB (this_basic_block)
1157 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1158 last_call_luid = 0;
1159 mem_last_set = -1;
1161 label_tick++;
1162 if (!single_pred_p (this_basic_block)
1163 || single_pred (this_basic_block) != last_bb)
1164 label_tick_ebb_start = label_tick;
1165 last_bb = this_basic_block;
1167 FOR_BB_INSNS (this_basic_block, insn)
1168 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1170 #ifdef AUTO_INC_DEC
1171 rtx links;
1172 #endif
1174 subst_low_luid = DF_INSN_LUID (insn);
1175 subst_insn = insn;
1177 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1178 insn);
1179 record_dead_and_set_regs (insn);
1181 #ifdef AUTO_INC_DEC
1182 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1183 if (REG_NOTE_KIND (links) == REG_INC)
1184 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1185 insn);
1186 #endif
1188 /* Record the current insn_rtx_cost of this instruction. */
1189 if (NONJUMP_INSN_P (insn))
1190 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1191 optimize_this_for_speed_p);
1192 if (dump_file)
1193 fprintf(dump_file, "insn_cost %d: %d\n",
1194 INSN_UID (insn), INSN_COST (insn));
1198 nonzero_sign_valid = 1;
1200 /* Now scan all the insns in forward order. */
1201 label_tick = label_tick_ebb_start = 1;
1202 init_reg_last ();
1203 setup_incoming_promotions (first);
1204 last_bb = ENTRY_BLOCK_PTR;
1206 FOR_EACH_BB (this_basic_block)
1208 rtx last_combined_insn = NULL_RTX;
1209 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1210 last_call_luid = 0;
1211 mem_last_set = -1;
1213 label_tick++;
1214 if (!single_pred_p (this_basic_block)
1215 || single_pred (this_basic_block) != last_bb)
1216 label_tick_ebb_start = label_tick;
1217 last_bb = this_basic_block;
1219 rtl_profile_for_bb (this_basic_block);
1220 for (insn = BB_HEAD (this_basic_block);
1221 insn != NEXT_INSN (BB_END (this_basic_block));
1222 insn = next ? next : NEXT_INSN (insn))
1224 next = 0;
1225 if (NONDEBUG_INSN_P (insn))
1227 while (last_combined_insn
1228 && INSN_DELETED_P (last_combined_insn))
1229 last_combined_insn = PREV_INSN (last_combined_insn);
1230 if (last_combined_insn == NULL_RTX
1231 || BARRIER_P (last_combined_insn)
1232 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1233 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1234 last_combined_insn = insn;
1236 /* See if we know about function return values before this
1237 insn based upon SUBREG flags. */
1238 check_promoted_subreg (insn, PATTERN (insn));
1240 /* See if we can find hardregs and subreg of pseudos in
1241 narrower modes. This could help turning TRUNCATEs
1242 into SUBREGs. */
1243 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1245 /* Try this insn with each insn it links back to. */
1247 FOR_EACH_LOG_LINK (links, insn)
1248 if ((next = try_combine (insn, links->insn, NULL_RTX,
1249 NULL_RTX, &new_direct_jump_p,
1250 last_combined_insn)) != 0)
1251 goto retry;
1253 /* Try each sequence of three linked insns ending with this one. */
1255 FOR_EACH_LOG_LINK (links, insn)
1257 rtx link = links->insn;
1259 /* If the linked insn has been replaced by a note, then there
1260 is no point in pursuing this chain any further. */
1261 if (NOTE_P (link))
1262 continue;
1264 FOR_EACH_LOG_LINK (nextlinks, link)
1265 if ((next = try_combine (insn, link, nextlinks->insn,
1266 NULL_RTX, &new_direct_jump_p,
1267 last_combined_insn)) != 0)
1268 goto retry;
1271 #ifdef HAVE_cc0
1272 /* Try to combine a jump insn that uses CC0
1273 with a preceding insn that sets CC0, and maybe with its
1274 logical predecessor as well.
1275 This is how we make decrement-and-branch insns.
1276 We need this special code because data flow connections
1277 via CC0 do not get entered in LOG_LINKS. */
1279 if (JUMP_P (insn)
1280 && (prev = prev_nonnote_insn (insn)) != 0
1281 && NONJUMP_INSN_P (prev)
1282 && sets_cc0_p (PATTERN (prev)))
1284 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1285 &new_direct_jump_p,
1286 last_combined_insn)) != 0)
1287 goto retry;
1289 FOR_EACH_LOG_LINK (nextlinks, prev)
1290 if ((next = try_combine (insn, prev, nextlinks->insn,
1291 NULL_RTX, &new_direct_jump_p,
1292 last_combined_insn)) != 0)
1293 goto retry;
1296 /* Do the same for an insn that explicitly references CC0. */
1297 if (NONJUMP_INSN_P (insn)
1298 && (prev = prev_nonnote_insn (insn)) != 0
1299 && NONJUMP_INSN_P (prev)
1300 && sets_cc0_p (PATTERN (prev))
1301 && GET_CODE (PATTERN (insn)) == SET
1302 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1304 if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1305 &new_direct_jump_p,
1306 last_combined_insn)) != 0)
1307 goto retry;
1309 FOR_EACH_LOG_LINK (nextlinks, prev)
1310 if ((next = try_combine (insn, prev, nextlinks->insn,
1311 NULL_RTX, &new_direct_jump_p,
1312 last_combined_insn)) != 0)
1313 goto retry;
1316 /* Finally, see if any of the insns that this insn links to
1317 explicitly references CC0. If so, try this insn, that insn,
1318 and its predecessor if it sets CC0. */
1319 FOR_EACH_LOG_LINK (links, insn)
1320 if (NONJUMP_INSN_P (links->insn)
1321 && GET_CODE (PATTERN (links->insn)) == SET
1322 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1323 && (prev = prev_nonnote_insn (links->insn)) != 0
1324 && NONJUMP_INSN_P (prev)
1325 && sets_cc0_p (PATTERN (prev))
1326 && (next = try_combine (insn, links->insn,
1327 prev, NULL_RTX, &new_direct_jump_p,
1328 last_combined_insn)) != 0)
1329 goto retry;
1330 #endif
1332 /* Try combining an insn with two different insns whose results it
1333 uses. */
1334 FOR_EACH_LOG_LINK (links, insn)
1335 for (nextlinks = links->next; nextlinks;
1336 nextlinks = nextlinks->next)
1337 if ((next = try_combine (insn, links->insn,
1338 nextlinks->insn, NULL_RTX,
1339 &new_direct_jump_p,
1340 last_combined_insn)) != 0)
1341 goto retry;
1343 /* Try four-instruction combinations. */
1344 FOR_EACH_LOG_LINK (links, insn)
1346 struct insn_link *next1;
1347 rtx link = links->insn;
1349 /* If the linked insn has been replaced by a note, then there
1350 is no point in pursuing this chain any further. */
1351 if (NOTE_P (link))
1352 continue;
1354 FOR_EACH_LOG_LINK (next1, link)
1356 rtx link1 = next1->insn;
1357 if (NOTE_P (link1))
1358 continue;
1359 /* I0 -> I1 -> I2 -> I3. */
1360 FOR_EACH_LOG_LINK (nextlinks, link1)
1361 if ((next = try_combine (insn, link, link1,
1362 nextlinks->insn,
1363 &new_direct_jump_p,
1364 last_combined_insn)) != 0)
1365 goto retry;
1366 /* I0, I1 -> I2, I2 -> I3. */
1367 for (nextlinks = next1->next; nextlinks;
1368 nextlinks = nextlinks->next)
1369 if ((next = try_combine (insn, link, link1,
1370 nextlinks->insn,
1371 &new_direct_jump_p,
1372 last_combined_insn)) != 0)
1373 goto retry;
1376 for (next1 = links->next; next1; next1 = next1->next)
1378 rtx link1 = next1->insn;
1379 if (NOTE_P (link1))
1380 continue;
1381 /* I0 -> I2; I1, I2 -> I3. */
1382 FOR_EACH_LOG_LINK (nextlinks, link)
1383 if ((next = try_combine (insn, link, link1,
1384 nextlinks->insn,
1385 &new_direct_jump_p,
1386 last_combined_insn)) != 0)
1387 goto retry;
1388 /* I0 -> I1; I1, I2 -> I3. */
1389 FOR_EACH_LOG_LINK (nextlinks, link1)
1390 if ((next = try_combine (insn, link, link1,
1391 nextlinks->insn,
1392 &new_direct_jump_p,
1393 last_combined_insn)) != 0)
1394 goto retry;
1398 /* Try this insn with each REG_EQUAL note it links back to. */
1399 FOR_EACH_LOG_LINK (links, insn)
1401 rtx set, note;
1402 rtx temp = links->insn;
1403 if ((set = single_set (temp)) != 0
1404 && (note = find_reg_equal_equiv_note (temp)) != 0
1405 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1406 /* Avoid using a register that may already been marked
1407 dead by an earlier instruction. */
1408 && ! unmentioned_reg_p (note, SET_SRC (set))
1409 && (GET_MODE (note) == VOIDmode
1410 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1411 : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
1413 /* Temporarily replace the set's source with the
1414 contents of the REG_EQUAL note. The insn will
1415 be deleted or recognized by try_combine. */
1416 rtx orig = SET_SRC (set);
1417 SET_SRC (set) = note;
1418 i2mod = temp;
1419 i2mod_old_rhs = copy_rtx (orig);
1420 i2mod_new_rhs = copy_rtx (note);
1421 next = try_combine (insn, i2mod, NULL_RTX, NULL_RTX,
1422 &new_direct_jump_p,
1423 last_combined_insn);
1424 i2mod = NULL_RTX;
1425 if (next)
1426 goto retry;
1427 SET_SRC (set) = orig;
1431 if (!NOTE_P (insn))
1432 record_dead_and_set_regs (insn);
1434 retry:
1440 default_rtl_profile ();
1441 clear_bb_flags ();
1442 new_direct_jump_p |= purge_all_dead_edges ();
1443 delete_noop_moves ();
1445 /* Clean up. */
1446 obstack_free (&insn_link_obstack, NULL);
1447 free (uid_log_links);
1448 free (uid_insn_cost);
1449 VEC_free (reg_stat_type, heap, reg_stat);
1452 struct undo *undo, *next;
1453 for (undo = undobuf.frees; undo; undo = next)
1455 next = undo->next;
1456 free (undo);
1458 undobuf.frees = 0;
1461 total_attempts += combine_attempts;
1462 total_merges += combine_merges;
1463 total_extras += combine_extras;
1464 total_successes += combine_successes;
1466 nonzero_sign_valid = 0;
1467 rtl_hooks = general_rtl_hooks;
1469 /* Make recognizer allow volatile MEMs again. */
1470 init_recog ();
1472 return new_direct_jump_p;
1475 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1477 static void
1478 init_reg_last (void)
1480 unsigned int i;
1481 reg_stat_type *p;
1483 FOR_EACH_VEC_ELT (reg_stat_type, reg_stat, i, p)
1484 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1487 /* Set up any promoted values for incoming argument registers. */
1489 static void
1490 setup_incoming_promotions (rtx first)
1492 tree arg;
1493 bool strictly_local = false;
1495 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1496 arg = DECL_CHAIN (arg))
1498 rtx x, reg = DECL_INCOMING_RTL (arg);
1499 int uns1, uns3;
1500 enum machine_mode mode1, mode2, mode3, mode4;
1502 /* Only continue if the incoming argument is in a register. */
1503 if (!REG_P (reg))
1504 continue;
1506 /* Determine, if possible, whether all call sites of the current
1507 function lie within the current compilation unit. (This does
1508 take into account the exporting of a function via taking its
1509 address, and so forth.) */
1510 strictly_local = cgraph_local_info (current_function_decl)->local;
1512 /* The mode and signedness of the argument before any promotions happen
1513 (equal to the mode of the pseudo holding it at that stage). */
1514 mode1 = TYPE_MODE (TREE_TYPE (arg));
1515 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1517 /* The mode and signedness of the argument after any source language and
1518 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1519 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1520 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1522 /* The mode and signedness of the argument as it is actually passed,
1523 after any TARGET_PROMOTE_FUNCTION_ARGS-driven ABI promotions. */
1524 mode3 = promote_function_mode (DECL_ARG_TYPE (arg), mode2, &uns3,
1525 TREE_TYPE (cfun->decl), 0);
1527 /* The mode of the register in which the argument is being passed. */
1528 mode4 = GET_MODE (reg);
1530 /* Eliminate sign extensions in the callee when:
1531 (a) A mode promotion has occurred; */
1532 if (mode1 == mode3)
1533 continue;
1534 /* (b) The mode of the register is the same as the mode of
1535 the argument as it is passed; */
1536 if (mode3 != mode4)
1537 continue;
1538 /* (c) There's no language level extension; */
1539 if (mode1 == mode2)
1541 /* (c.1) All callers are from the current compilation unit. If that's
1542 the case we don't have to rely on an ABI, we only have to know
1543 what we're generating right now, and we know that we will do the
1544 mode1 to mode2 promotion with the given sign. */
1545 else if (!strictly_local)
1546 continue;
1547 /* (c.2) The combination of the two promotions is useful. This is
1548 true when the signs match, or if the first promotion is unsigned.
1549 In the later case, (sign_extend (zero_extend x)) is the same as
1550 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1551 else if (uns1)
1552 uns3 = true;
1553 else if (uns3)
1554 continue;
1556 /* Record that the value was promoted from mode1 to mode3,
1557 so that any sign extension at the head of the current
1558 function may be eliminated. */
1559 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1560 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1561 record_value_for_reg (reg, first, x);
1565 /* Called via note_stores. If X is a pseudo that is narrower than
1566 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1568 If we are setting only a portion of X and we can't figure out what
1569 portion, assume all bits will be used since we don't know what will
1570 be happening.
1572 Similarly, set how many bits of X are known to be copies of the sign bit
1573 at all locations in the function. This is the smallest number implied
1574 by any set of X. */
1576 static void
1577 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1579 rtx insn = (rtx) data;
1580 unsigned int num;
1582 if (REG_P (x)
1583 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1584 /* If this register is undefined at the start of the file, we can't
1585 say what its contents were. */
1586 && ! REGNO_REG_SET_P
1587 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
1588 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
1590 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
1592 if (set == 0 || GET_CODE (set) == CLOBBER)
1594 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1595 rsp->sign_bit_copies = 1;
1596 return;
1599 /* If this register is being initialized using itself, and the
1600 register is uninitialized in this basic block, and there are
1601 no LOG_LINKS which set the register, then part of the
1602 register is uninitialized. In that case we can't assume
1603 anything about the number of nonzero bits.
1605 ??? We could do better if we checked this in
1606 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1607 could avoid making assumptions about the insn which initially
1608 sets the register, while still using the information in other
1609 insns. We would have to be careful to check every insn
1610 involved in the combination. */
1612 if (insn
1613 && reg_referenced_p (x, PATTERN (insn))
1614 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1615 REGNO (x)))
1617 struct insn_link *link;
1619 FOR_EACH_LOG_LINK (link, insn)
1620 if (dead_or_set_p (link->insn, x))
1621 break;
1622 if (!link)
1624 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1625 rsp->sign_bit_copies = 1;
1626 return;
1630 /* If this is a complex assignment, see if we can convert it into a
1631 simple assignment. */
1632 set = expand_field_assignment (set);
1634 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1635 set what we know about X. */
1637 if (SET_DEST (set) == x
1638 || (paradoxical_subreg_p (SET_DEST (set))
1639 && SUBREG_REG (SET_DEST (set)) == x))
1641 rtx src = SET_SRC (set);
1643 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1644 /* If X is narrower than a word and SRC is a non-negative
1645 constant that would appear negative in the mode of X,
1646 sign-extend it for use in reg_stat[].nonzero_bits because some
1647 machines (maybe most) will actually do the sign-extension
1648 and this is the conservative approach.
1650 ??? For 2.5, try to tighten up the MD files in this regard
1651 instead of this kludge. */
1653 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
1654 && CONST_INT_P (src)
1655 && INTVAL (src) > 0
1656 && val_signbit_known_set_p (GET_MODE (x), INTVAL (src)))
1657 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (GET_MODE (x)));
1658 #endif
1660 /* Don't call nonzero_bits if it cannot change anything. */
1661 if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1662 rsp->nonzero_bits |= nonzero_bits (src, nonzero_bits_mode);
1663 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1664 if (rsp->sign_bit_copies == 0
1665 || rsp->sign_bit_copies > num)
1666 rsp->sign_bit_copies = num;
1668 else
1670 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1671 rsp->sign_bit_copies = 1;
1676 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1677 optionally insns that were previously combined into I3 or that will be
1678 combined into the merger of INSN and I3. The order is PRED, PRED2,
1679 INSN, SUCC, SUCC2, I3.
1681 Return 0 if the combination is not allowed for any reason.
1683 If the combination is allowed, *PDEST will be set to the single
1684 destination of INSN and *PSRC to the single source, and this function
1685 will return 1. */
1687 static int
1688 can_combine_p (rtx insn, rtx i3, rtx pred ATTRIBUTE_UNUSED,
1689 rtx pred2 ATTRIBUTE_UNUSED, rtx succ, rtx succ2,
1690 rtx *pdest, rtx *psrc)
1692 int i;
1693 const_rtx set = 0;
1694 rtx src, dest;
1695 rtx p;
1696 #ifdef AUTO_INC_DEC
1697 rtx link;
1698 #endif
1699 bool all_adjacent = true;
1700 int (*is_volatile_p) (const_rtx);
1702 if (succ)
1704 if (succ2)
1706 if (next_active_insn (succ2) != i3)
1707 all_adjacent = false;
1708 if (next_active_insn (succ) != succ2)
1709 all_adjacent = false;
1711 else if (next_active_insn (succ) != i3)
1712 all_adjacent = false;
1713 if (next_active_insn (insn) != succ)
1714 all_adjacent = false;
1716 else if (next_active_insn (insn) != i3)
1717 all_adjacent = false;
1719 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1720 or a PARALLEL consisting of such a SET and CLOBBERs.
1722 If INSN has CLOBBER parallel parts, ignore them for our processing.
1723 By definition, these happen during the execution of the insn. When it
1724 is merged with another insn, all bets are off. If they are, in fact,
1725 needed and aren't also supplied in I3, they may be added by
1726 recog_for_combine. Otherwise, it won't match.
1728 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1729 note.
1731 Get the source and destination of INSN. If more than one, can't
1732 combine. */
1734 if (GET_CODE (PATTERN (insn)) == SET)
1735 set = PATTERN (insn);
1736 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1737 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1739 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1741 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1743 switch (GET_CODE (elt))
1745 /* This is important to combine floating point insns
1746 for the SH4 port. */
1747 case USE:
1748 /* Combining an isolated USE doesn't make sense.
1749 We depend here on combinable_i3pat to reject them. */
1750 /* The code below this loop only verifies that the inputs of
1751 the SET in INSN do not change. We call reg_set_between_p
1752 to verify that the REG in the USE does not change between
1753 I3 and INSN.
1754 If the USE in INSN was for a pseudo register, the matching
1755 insn pattern will likely match any register; combining this
1756 with any other USE would only be safe if we knew that the
1757 used registers have identical values, or if there was
1758 something to tell them apart, e.g. different modes. For
1759 now, we forgo such complicated tests and simply disallow
1760 combining of USES of pseudo registers with any other USE. */
1761 if (REG_P (XEXP (elt, 0))
1762 && GET_CODE (PATTERN (i3)) == PARALLEL)
1764 rtx i3pat = PATTERN (i3);
1765 int i = XVECLEN (i3pat, 0) - 1;
1766 unsigned int regno = REGNO (XEXP (elt, 0));
1770 rtx i3elt = XVECEXP (i3pat, 0, i);
1772 if (GET_CODE (i3elt) == USE
1773 && REG_P (XEXP (i3elt, 0))
1774 && (REGNO (XEXP (i3elt, 0)) == regno
1775 ? reg_set_between_p (XEXP (elt, 0),
1776 PREV_INSN (insn), i3)
1777 : regno >= FIRST_PSEUDO_REGISTER))
1778 return 0;
1780 while (--i >= 0);
1782 break;
1784 /* We can ignore CLOBBERs. */
1785 case CLOBBER:
1786 break;
1788 case SET:
1789 /* Ignore SETs whose result isn't used but not those that
1790 have side-effects. */
1791 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1792 && insn_nothrow_p (insn)
1793 && !side_effects_p (elt))
1794 break;
1796 /* If we have already found a SET, this is a second one and
1797 so we cannot combine with this insn. */
1798 if (set)
1799 return 0;
1801 set = elt;
1802 break;
1804 default:
1805 /* Anything else means we can't combine. */
1806 return 0;
1810 if (set == 0
1811 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1812 so don't do anything with it. */
1813 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1814 return 0;
1816 else
1817 return 0;
1819 if (set == 0)
1820 return 0;
1822 /* The simplification in expand_field_assignment may call back to
1823 get_last_value, so set safe guard here. */
1824 subst_low_luid = DF_INSN_LUID (insn);
1826 set = expand_field_assignment (set);
1827 src = SET_SRC (set), dest = SET_DEST (set);
1829 /* Don't eliminate a store in the stack pointer. */
1830 if (dest == stack_pointer_rtx
1831 /* Don't combine with an insn that sets a register to itself if it has
1832 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1833 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1834 /* Can't merge an ASM_OPERANDS. */
1835 || GET_CODE (src) == ASM_OPERANDS
1836 /* Can't merge a function call. */
1837 || GET_CODE (src) == CALL
1838 /* Don't eliminate a function call argument. */
1839 || (CALL_P (i3)
1840 && (find_reg_fusage (i3, USE, dest)
1841 || (REG_P (dest)
1842 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1843 && global_regs[REGNO (dest)])))
1844 /* Don't substitute into an incremented register. */
1845 || FIND_REG_INC_NOTE (i3, dest)
1846 || (succ && FIND_REG_INC_NOTE (succ, dest))
1847 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1848 /* Don't substitute into a non-local goto, this confuses CFG. */
1849 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1850 /* Make sure that DEST is not used after SUCC but before I3. */
1851 || (!all_adjacent
1852 && ((succ2
1853 && (reg_used_between_p (dest, succ2, i3)
1854 || reg_used_between_p (dest, succ, succ2)))
1855 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1856 /* Make sure that the value that is to be substituted for the register
1857 does not use any registers whose values alter in between. However,
1858 If the insns are adjacent, a use can't cross a set even though we
1859 think it might (this can happen for a sequence of insns each setting
1860 the same destination; last_set of that register might point to
1861 a NOTE). If INSN has a REG_EQUIV note, the register is always
1862 equivalent to the memory so the substitution is valid even if there
1863 are intervening stores. Also, don't move a volatile asm or
1864 UNSPEC_VOLATILE across any other insns. */
1865 || (! all_adjacent
1866 && (((!MEM_P (src)
1867 || ! find_reg_note (insn, REG_EQUIV, src))
1868 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1869 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1870 || GET_CODE (src) == UNSPEC_VOLATILE))
1871 /* Don't combine across a CALL_INSN, because that would possibly
1872 change whether the life span of some REGs crosses calls or not,
1873 and it is a pain to update that information.
1874 Exception: if source is a constant, moving it later can't hurt.
1875 Accept that as a special case. */
1876 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1877 return 0;
1879 /* DEST must either be a REG or CC0. */
1880 if (REG_P (dest))
1882 /* If register alignment is being enforced for multi-word items in all
1883 cases except for parameters, it is possible to have a register copy
1884 insn referencing a hard register that is not allowed to contain the
1885 mode being copied and which would not be valid as an operand of most
1886 insns. Eliminate this problem by not combining with such an insn.
1888 Also, on some machines we don't want to extend the life of a hard
1889 register. */
1891 if (REG_P (src)
1892 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1893 && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1894 /* Don't extend the life of a hard register unless it is
1895 user variable (if we have few registers) or it can't
1896 fit into the desired register (meaning something special
1897 is going on).
1898 Also avoid substituting a return register into I3, because
1899 reload can't handle a conflict with constraints of other
1900 inputs. */
1901 || (REGNO (src) < FIRST_PSEUDO_REGISTER
1902 && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
1903 return 0;
1905 else if (GET_CODE (dest) != CC0)
1906 return 0;
1909 if (GET_CODE (PATTERN (i3)) == PARALLEL)
1910 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1911 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1913 /* Don't substitute for a register intended as a clobberable
1914 operand. */
1915 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1916 if (rtx_equal_p (reg, dest))
1917 return 0;
1919 /* If the clobber represents an earlyclobber operand, we must not
1920 substitute an expression containing the clobbered register.
1921 As we do not analyze the constraint strings here, we have to
1922 make the conservative assumption. However, if the register is
1923 a fixed hard reg, the clobber cannot represent any operand;
1924 we leave it up to the machine description to either accept or
1925 reject use-and-clobber patterns. */
1926 if (!REG_P (reg)
1927 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1928 || !fixed_regs[REGNO (reg)])
1929 if (reg_overlap_mentioned_p (reg, src))
1930 return 0;
1933 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1934 or not), reject, unless nothing volatile comes between it and I3 */
1936 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
1938 /* Make sure neither succ nor succ2 contains a volatile reference. */
1939 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
1940 return 0;
1941 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
1942 return 0;
1943 /* We'll check insns between INSN and I3 below. */
1946 /* If INSN is an asm, and DEST is a hard register, reject, since it has
1947 to be an explicit register variable, and was chosen for a reason. */
1949 if (GET_CODE (src) == ASM_OPERANDS
1950 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
1951 return 0;
1953 /* If INSN contains volatile references (specifically volatile MEMs),
1954 we cannot combine across any other volatile references.
1955 Even if INSN doesn't contain volatile references, any intervening
1956 volatile insn might affect machine state. */
1958 is_volatile_p = volatile_refs_p (PATTERN (insn))
1959 ? volatile_refs_p
1960 : volatile_insn_p;
1962 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
1963 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
1964 return 0;
1966 /* If INSN contains an autoincrement or autodecrement, make sure that
1967 register is not used between there and I3, and not already used in
1968 I3 either. Neither must it be used in PRED or SUCC, if they exist.
1969 Also insist that I3 not be a jump; if it were one
1970 and the incremented register were spilled, we would lose. */
1972 #ifdef AUTO_INC_DEC
1973 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1974 if (REG_NOTE_KIND (link) == REG_INC
1975 && (JUMP_P (i3)
1976 || reg_used_between_p (XEXP (link, 0), insn, i3)
1977 || (pred != NULL_RTX
1978 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
1979 || (pred2 != NULL_RTX
1980 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
1981 || (succ != NULL_RTX
1982 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
1983 || (succ2 != NULL_RTX
1984 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
1985 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
1986 return 0;
1987 #endif
1989 #ifdef HAVE_cc0
1990 /* Don't combine an insn that follows a CC0-setting insn.
1991 An insn that uses CC0 must not be separated from the one that sets it.
1992 We do, however, allow I2 to follow a CC0-setting insn if that insn
1993 is passed as I1; in that case it will be deleted also.
1994 We also allow combining in this case if all the insns are adjacent
1995 because that would leave the two CC0 insns adjacent as well.
1996 It would be more logical to test whether CC0 occurs inside I1 or I2,
1997 but that would be much slower, and this ought to be equivalent. */
1999 p = prev_nonnote_insn (insn);
2000 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2001 && ! all_adjacent)
2002 return 0;
2003 #endif
2005 /* If we get here, we have passed all the tests and the combination is
2006 to be allowed. */
2008 *pdest = dest;
2009 *psrc = src;
2011 return 1;
2014 /* LOC is the location within I3 that contains its pattern or the component
2015 of a PARALLEL of the pattern. We validate that it is valid for combining.
2017 One problem is if I3 modifies its output, as opposed to replacing it
2018 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2019 doing so would produce an insn that is not equivalent to the original insns.
2021 Consider:
2023 (set (reg:DI 101) (reg:DI 100))
2024 (set (subreg:SI (reg:DI 101) 0) <foo>)
2026 This is NOT equivalent to:
2028 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2029 (set (reg:DI 101) (reg:DI 100))])
2031 Not only does this modify 100 (in which case it might still be valid
2032 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2034 We can also run into a problem if I2 sets a register that I1
2035 uses and I1 gets directly substituted into I3 (not via I2). In that
2036 case, we would be getting the wrong value of I2DEST into I3, so we
2037 must reject the combination. This case occurs when I2 and I1 both
2038 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2039 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2040 of a SET must prevent combination from occurring. The same situation
2041 can occur for I0, in which case I0_NOT_IN_SRC is set.
2043 Before doing the above check, we first try to expand a field assignment
2044 into a set of logical operations.
2046 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2047 we place a register that is both set and used within I3. If more than one
2048 such register is detected, we fail.
2050 Return 1 if the combination is valid, zero otherwise. */
2052 static int
2053 combinable_i3pat (rtx i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2054 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2056 rtx x = *loc;
2058 if (GET_CODE (x) == SET)
2060 rtx set = x ;
2061 rtx dest = SET_DEST (set);
2062 rtx src = SET_SRC (set);
2063 rtx inner_dest = dest;
2064 rtx subdest;
2066 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2067 || GET_CODE (inner_dest) == SUBREG
2068 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2069 inner_dest = XEXP (inner_dest, 0);
2071 /* Check for the case where I3 modifies its output, as discussed
2072 above. We don't want to prevent pseudos from being combined
2073 into the address of a MEM, so only prevent the combination if
2074 i1 or i2 set the same MEM. */
2075 if ((inner_dest != dest &&
2076 (!MEM_P (inner_dest)
2077 || rtx_equal_p (i2dest, inner_dest)
2078 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2079 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2080 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2081 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2082 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2084 /* This is the same test done in can_combine_p except we can't test
2085 all_adjacent; we don't have to, since this instruction will stay
2086 in place, thus we are not considering increasing the lifetime of
2087 INNER_DEST.
2089 Also, if this insn sets a function argument, combining it with
2090 something that might need a spill could clobber a previous
2091 function argument; the all_adjacent test in can_combine_p also
2092 checks this; here, we do a more specific test for this case. */
2094 || (REG_P (inner_dest)
2095 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2096 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2097 GET_MODE (inner_dest))))
2098 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2099 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2100 return 0;
2102 /* If DEST is used in I3, it is being killed in this insn, so
2103 record that for later. We have to consider paradoxical
2104 subregs here, since they kill the whole register, but we
2105 ignore partial subregs, STRICT_LOW_PART, etc.
2106 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2107 STACK_POINTER_REGNUM, since these are always considered to be
2108 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2109 subdest = dest;
2110 if (GET_CODE (subdest) == SUBREG
2111 && (GET_MODE_SIZE (GET_MODE (subdest))
2112 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2113 subdest = SUBREG_REG (subdest);
2114 if (pi3dest_killed
2115 && REG_P (subdest)
2116 && reg_referenced_p (subdest, PATTERN (i3))
2117 && REGNO (subdest) != FRAME_POINTER_REGNUM
2118 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2119 && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
2120 #endif
2121 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2122 && (REGNO (subdest) != ARG_POINTER_REGNUM
2123 || ! fixed_regs [REGNO (subdest)])
2124 #endif
2125 && REGNO (subdest) != STACK_POINTER_REGNUM)
2127 if (*pi3dest_killed)
2128 return 0;
2130 *pi3dest_killed = subdest;
2134 else if (GET_CODE (x) == PARALLEL)
2136 int i;
2138 for (i = 0; i < XVECLEN (x, 0); i++)
2139 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2140 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2141 return 0;
2144 return 1;
2147 /* Return 1 if X is an arithmetic expression that contains a multiplication
2148 and division. We don't count multiplications by powers of two here. */
2150 static int
2151 contains_muldiv (rtx x)
2153 switch (GET_CODE (x))
2155 case MOD: case DIV: case UMOD: case UDIV:
2156 return 1;
2158 case MULT:
2159 return ! (CONST_INT_P (XEXP (x, 1))
2160 && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2161 default:
2162 if (BINARY_P (x))
2163 return contains_muldiv (XEXP (x, 0))
2164 || contains_muldiv (XEXP (x, 1));
2166 if (UNARY_P (x))
2167 return contains_muldiv (XEXP (x, 0));
2169 return 0;
2173 /* Determine whether INSN can be used in a combination. Return nonzero if
2174 not. This is used in try_combine to detect early some cases where we
2175 can't perform combinations. */
2177 static int
2178 cant_combine_insn_p (rtx insn)
2180 rtx set;
2181 rtx src, dest;
2183 /* If this isn't really an insn, we can't do anything.
2184 This can occur when flow deletes an insn that it has merged into an
2185 auto-increment address. */
2186 if (! INSN_P (insn))
2187 return 1;
2189 /* Never combine loads and stores involving hard regs that are likely
2190 to be spilled. The register allocator can usually handle such
2191 reg-reg moves by tying. If we allow the combiner to make
2192 substitutions of likely-spilled regs, reload might die.
2193 As an exception, we allow combinations involving fixed regs; these are
2194 not available to the register allocator so there's no risk involved. */
2196 set = single_set (insn);
2197 if (! set)
2198 return 0;
2199 src = SET_SRC (set);
2200 dest = SET_DEST (set);
2201 if (GET_CODE (src) == SUBREG)
2202 src = SUBREG_REG (src);
2203 if (GET_CODE (dest) == SUBREG)
2204 dest = SUBREG_REG (dest);
2205 if (REG_P (src) && REG_P (dest)
2206 && ((HARD_REGISTER_P (src)
2207 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2208 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2209 || (HARD_REGISTER_P (dest)
2210 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2211 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2212 return 1;
2214 return 0;
2217 struct likely_spilled_retval_info
2219 unsigned regno, nregs;
2220 unsigned mask;
2223 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2224 hard registers that are known to be written to / clobbered in full. */
2225 static void
2226 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2228 struct likely_spilled_retval_info *const info =
2229 (struct likely_spilled_retval_info *) data;
2230 unsigned regno, nregs;
2231 unsigned new_mask;
2233 if (!REG_P (XEXP (set, 0)))
2234 return;
2235 regno = REGNO (x);
2236 if (regno >= info->regno + info->nregs)
2237 return;
2238 nregs = hard_regno_nregs[regno][GET_MODE (x)];
2239 if (regno + nregs <= info->regno)
2240 return;
2241 new_mask = (2U << (nregs - 1)) - 1;
2242 if (regno < info->regno)
2243 new_mask >>= info->regno - regno;
2244 else
2245 new_mask <<= regno - info->regno;
2246 info->mask &= ~new_mask;
2249 /* Return nonzero iff part of the return value is live during INSN, and
2250 it is likely spilled. This can happen when more than one insn is needed
2251 to copy the return value, e.g. when we consider to combine into the
2252 second copy insn for a complex value. */
2254 static int
2255 likely_spilled_retval_p (rtx insn)
2257 rtx use = BB_END (this_basic_block);
2258 rtx reg, p;
2259 unsigned regno, nregs;
2260 /* We assume here that no machine mode needs more than
2261 32 hard registers when the value overlaps with a register
2262 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2263 unsigned mask;
2264 struct likely_spilled_retval_info info;
2266 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2267 return 0;
2268 reg = XEXP (PATTERN (use), 0);
2269 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2270 return 0;
2271 regno = REGNO (reg);
2272 nregs = hard_regno_nregs[regno][GET_MODE (reg)];
2273 if (nregs == 1)
2274 return 0;
2275 mask = (2U << (nregs - 1)) - 1;
2277 /* Disregard parts of the return value that are set later. */
2278 info.regno = regno;
2279 info.nregs = nregs;
2280 info.mask = mask;
2281 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2282 if (INSN_P (p))
2283 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2284 mask = info.mask;
2286 /* Check if any of the (probably) live return value registers is
2287 likely spilled. */
2288 nregs --;
2291 if ((mask & 1 << nregs)
2292 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2293 return 1;
2294 } while (nregs--);
2295 return 0;
2298 /* Adjust INSN after we made a change to its destination.
2300 Changing the destination can invalidate notes that say something about
2301 the results of the insn and a LOG_LINK pointing to the insn. */
2303 static void
2304 adjust_for_new_dest (rtx insn)
2306 /* For notes, be conservative and simply remove them. */
2307 remove_reg_equal_equiv_notes (insn);
2309 /* The new insn will have a destination that was previously the destination
2310 of an insn just above it. Call distribute_links to make a LOG_LINK from
2311 the next use of that destination. */
2312 distribute_links (alloc_insn_link (insn, NULL));
2314 df_insn_rescan (insn);
2317 /* Return TRUE if combine can reuse reg X in mode MODE.
2318 ADDED_SETS is nonzero if the original set is still required. */
2319 static bool
2320 can_change_dest_mode (rtx x, int added_sets, enum machine_mode mode)
2322 unsigned int regno;
2324 if (!REG_P(x))
2325 return false;
2327 regno = REGNO (x);
2328 /* Allow hard registers if the new mode is legal, and occupies no more
2329 registers than the old mode. */
2330 if (regno < FIRST_PSEUDO_REGISTER)
2331 return (HARD_REGNO_MODE_OK (regno, mode)
2332 && (hard_regno_nregs[regno][GET_MODE (x)]
2333 >= hard_regno_nregs[regno][mode]));
2335 /* Or a pseudo that is only used once. */
2336 return (REG_N_SETS (regno) == 1 && !added_sets
2337 && !REG_USERVAR_P (x));
2341 /* Check whether X, the destination of a set, refers to part of
2342 the register specified by REG. */
2344 static bool
2345 reg_subword_p (rtx x, rtx reg)
2347 /* Check that reg is an integer mode register. */
2348 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2349 return false;
2351 if (GET_CODE (x) == STRICT_LOW_PART
2352 || GET_CODE (x) == ZERO_EXTRACT)
2353 x = XEXP (x, 0);
2355 return GET_CODE (x) == SUBREG
2356 && SUBREG_REG (x) == reg
2357 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2360 #ifdef AUTO_INC_DEC
2361 /* Replace auto-increment addressing modes with explicit operations to access
2362 the same addresses without modifying the corresponding registers. */
2364 static rtx
2365 cleanup_auto_inc_dec (rtx src, enum machine_mode mem_mode)
2367 rtx x = src;
2368 const RTX_CODE code = GET_CODE (x);
2369 int i;
2370 const char *fmt;
2372 switch (code)
2374 case REG:
2375 case CONST_INT:
2376 case CONST_DOUBLE:
2377 case CONST_FIXED:
2378 case CONST_VECTOR:
2379 case SYMBOL_REF:
2380 case CODE_LABEL:
2381 case PC:
2382 case CC0:
2383 case SCRATCH:
2384 /* SCRATCH must be shared because they represent distinct values. */
2385 return x;
2386 case CLOBBER:
2387 if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)
2388 return x;
2389 break;
2391 case CONST:
2392 if (shared_const_p (x))
2393 return x;
2394 break;
2396 case MEM:
2397 mem_mode = GET_MODE (x);
2398 break;
2400 case PRE_INC:
2401 case PRE_DEC:
2402 gcc_assert (mem_mode != VOIDmode && mem_mode != BLKmode);
2403 return gen_rtx_PLUS (GET_MODE (x),
2404 cleanup_auto_inc_dec (XEXP (x, 0), mem_mode),
2405 GEN_INT (code == PRE_INC
2406 ? GET_MODE_SIZE (mem_mode)
2407 : -GET_MODE_SIZE (mem_mode)));
2409 case POST_INC:
2410 case POST_DEC:
2411 case PRE_MODIFY:
2412 case POST_MODIFY:
2413 return cleanup_auto_inc_dec (code == PRE_MODIFY
2414 ? XEXP (x, 1) : XEXP (x, 0),
2415 mem_mode);
2417 default:
2418 break;
2421 /* Copy the various flags, fields, and other information. We assume
2422 that all fields need copying, and then clear the fields that should
2423 not be copied. That is the sensible default behavior, and forces
2424 us to explicitly document why we are *not* copying a flag. */
2425 x = shallow_copy_rtx (x);
2427 /* We do not copy the USED flag, which is used as a mark bit during
2428 walks over the RTL. */
2429 RTX_FLAG (x, used) = 0;
2431 /* We do not copy FRAME_RELATED for INSNs. */
2432 if (INSN_P (x))
2433 RTX_FLAG (x, frame_related) = 0;
2435 fmt = GET_RTX_FORMAT (code);
2436 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2437 if (fmt[i] == 'e')
2438 XEXP (x, i) = cleanup_auto_inc_dec (XEXP (x, i), mem_mode);
2439 else if (fmt[i] == 'E' || fmt[i] == 'V')
2441 int j;
2442 XVEC (x, i) = rtvec_alloc (XVECLEN (x, i));
2443 for (j = 0; j < XVECLEN (x, i); j++)
2444 XVECEXP (x, i, j)
2445 = cleanup_auto_inc_dec (XVECEXP (src, i, j), mem_mode);
2448 return x;
2450 #endif
2452 /* Auxiliary data structure for propagate_for_debug_stmt. */
2454 struct rtx_subst_pair
2456 rtx to;
2457 bool adjusted;
2460 /* DATA points to an rtx_subst_pair. Return the value that should be
2461 substituted. */
2463 static rtx
2464 propagate_for_debug_subst (rtx from, const_rtx old_rtx, void *data)
2466 struct rtx_subst_pair *pair = (struct rtx_subst_pair *)data;
2468 if (!rtx_equal_p (from, old_rtx))
2469 return NULL_RTX;
2470 if (!pair->adjusted)
2472 pair->adjusted = true;
2473 #ifdef AUTO_INC_DEC
2474 pair->to = cleanup_auto_inc_dec (pair->to, VOIDmode);
2475 #else
2476 pair->to = copy_rtx (pair->to);
2477 #endif
2478 pair->to = make_compound_operation (pair->to, SET);
2479 return pair->to;
2481 return copy_rtx (pair->to);
2484 /* Replace all the occurrences of DEST with SRC in DEBUG_INSNs between INSN
2485 and LAST, not including INSN, but including LAST. Also stop at the end
2486 of THIS_BASIC_BLOCK. */
2488 static void
2489 propagate_for_debug (rtx insn, rtx last, rtx dest, rtx src)
2491 rtx next, loc, end = NEXT_INSN (BB_END (this_basic_block));
2493 struct rtx_subst_pair p;
2494 p.to = src;
2495 p.adjusted = false;
2497 next = NEXT_INSN (insn);
2498 last = NEXT_INSN (last);
2499 while (next != last && next != end)
2501 insn = next;
2502 next = NEXT_INSN (insn);
2503 if (DEBUG_INSN_P (insn))
2505 loc = simplify_replace_fn_rtx (INSN_VAR_LOCATION_LOC (insn),
2506 dest, propagate_for_debug_subst, &p);
2507 if (loc == INSN_VAR_LOCATION_LOC (insn))
2508 continue;
2509 INSN_VAR_LOCATION_LOC (insn) = loc;
2510 df_insn_rescan (insn);
2515 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2516 Note that the INSN should be deleted *after* removing dead edges, so
2517 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2518 but not for a (set (pc) (label_ref FOO)). */
2520 static void
2521 update_cfg_for_uncondjump (rtx insn)
2523 basic_block bb = BLOCK_FOR_INSN (insn);
2524 gcc_assert (BB_END (bb) == insn);
2526 purge_dead_edges (bb);
2528 delete_insn (insn);
2529 if (EDGE_COUNT (bb->succs) == 1)
2531 rtx insn;
2533 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2535 /* Remove barriers from the footer if there are any. */
2536 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2537 if (BARRIER_P (insn))
2539 if (PREV_INSN (insn))
2540 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2541 else
2542 BB_FOOTER (bb) = NEXT_INSN (insn);
2543 if (NEXT_INSN (insn))
2544 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2546 else if (LABEL_P (insn))
2547 break;
2551 /* Try to combine the insns I0, I1 and I2 into I3.
2552 Here I0, I1 and I2 appear earlier than I3.
2553 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2556 If we are combining more than two insns and the resulting insn is not
2557 recognized, try splitting it into two insns. If that happens, I2 and I3
2558 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2559 Otherwise, I0, I1 and I2 are pseudo-deleted.
2561 Return 0 if the combination does not work. Then nothing is changed.
2562 If we did the combination, return the insn at which combine should
2563 resume scanning.
2565 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2566 new direct jump instruction.
2568 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2569 been I3 passed to an earlier try_combine within the same basic
2570 block. */
2572 static rtx
2573 try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
2574 rtx last_combined_insn)
2576 /* New patterns for I3 and I2, respectively. */
2577 rtx newpat, newi2pat = 0;
2578 rtvec newpat_vec_with_clobbers = 0;
2579 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2580 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2581 dead. */
2582 int added_sets_0, added_sets_1, added_sets_2;
2583 /* Total number of SETs to put into I3. */
2584 int total_sets;
2585 /* Nonzero if I2's or I1's body now appears in I3. */
2586 int i2_is_used = 0, i1_is_used = 0;
2587 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2588 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2589 /* Contains I3 if the destination of I3 is used in its source, which means
2590 that the old life of I3 is being killed. If that usage is placed into
2591 I2 and not in I3, a REG_DEAD note must be made. */
2592 rtx i3dest_killed = 0;
2593 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2594 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2595 /* Copy of SET_SRC of I1 and I0, if needed. */
2596 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2597 /* Set if I2DEST was reused as a scratch register. */
2598 bool i2scratch = false;
2599 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2600 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2601 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2602 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2603 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2604 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2605 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2606 /* Notes that must be added to REG_NOTES in I3 and I2. */
2607 rtx new_i3_notes, new_i2_notes;
2608 /* Notes that we substituted I3 into I2 instead of the normal case. */
2609 int i3_subst_into_i2 = 0;
2610 /* Notes that I1, I2 or I3 is a MULT operation. */
2611 int have_mult = 0;
2612 int swap_i2i3 = 0;
2613 int changed_i3_dest = 0;
2615 int maxreg;
2616 rtx temp;
2617 struct insn_link *link;
2618 rtx other_pat = 0;
2619 rtx new_other_notes;
2620 int i;
2622 /* Only try four-insn combinations when there's high likelihood of
2623 success. Look for simple insns, such as loads of constants or
2624 binary operations involving a constant. */
2625 if (i0)
2627 int i;
2628 int ngood = 0;
2629 int nshift = 0;
2631 if (!flag_expensive_optimizations)
2632 return 0;
2634 for (i = 0; i < 4; i++)
2636 rtx insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2637 rtx set = single_set (insn);
2638 rtx src;
2639 if (!set)
2640 continue;
2641 src = SET_SRC (set);
2642 if (CONSTANT_P (src))
2644 ngood += 2;
2645 break;
2647 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2648 ngood++;
2649 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2650 || GET_CODE (src) == LSHIFTRT)
2651 nshift++;
2653 if (ngood < 2 && nshift < 2)
2654 return 0;
2657 /* Exit early if one of the insns involved can't be used for
2658 combinations. */
2659 if (cant_combine_insn_p (i3)
2660 || cant_combine_insn_p (i2)
2661 || (i1 && cant_combine_insn_p (i1))
2662 || (i0 && cant_combine_insn_p (i0))
2663 || likely_spilled_retval_p (i3))
2664 return 0;
2666 combine_attempts++;
2667 undobuf.other_insn = 0;
2669 /* Reset the hard register usage information. */
2670 CLEAR_HARD_REG_SET (newpat_used_regs);
2672 if (dump_file && (dump_flags & TDF_DETAILS))
2674 if (i0)
2675 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2676 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2677 else if (i1)
2678 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2679 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2680 else
2681 fprintf (dump_file, "\nTrying %d -> %d:\n",
2682 INSN_UID (i2), INSN_UID (i3));
2685 /* If multiple insns feed into one of I2 or I3, they can be in any
2686 order. To simplify the code below, reorder them in sequence. */
2687 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2688 temp = i2, i2 = i0, i0 = temp;
2689 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2690 temp = i1, i1 = i0, i0 = temp;
2691 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2692 temp = i1, i1 = i2, i2 = temp;
2694 added_links_insn = 0;
2696 /* First check for one important special case that the code below will
2697 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2698 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2699 we may be able to replace that destination with the destination of I3.
2700 This occurs in the common code where we compute both a quotient and
2701 remainder into a structure, in which case we want to do the computation
2702 directly into the structure to avoid register-register copies.
2704 Note that this case handles both multiple sets in I2 and also cases
2705 where I2 has a number of CLOBBERs inside the PARALLEL.
2707 We make very conservative checks below and only try to handle the
2708 most common cases of this. For example, we only handle the case
2709 where I2 and I3 are adjacent to avoid making difficult register
2710 usage tests. */
2712 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2713 && REG_P (SET_SRC (PATTERN (i3)))
2714 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2715 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2716 && GET_CODE (PATTERN (i2)) == PARALLEL
2717 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2718 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2719 below would need to check what is inside (and reg_overlap_mentioned_p
2720 doesn't support those codes anyway). Don't allow those destinations;
2721 the resulting insn isn't likely to be recognized anyway. */
2722 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2723 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2724 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2725 SET_DEST (PATTERN (i3)))
2726 && next_active_insn (i2) == i3)
2728 rtx p2 = PATTERN (i2);
2730 /* Make sure that the destination of I3,
2731 which we are going to substitute into one output of I2,
2732 is not used within another output of I2. We must avoid making this:
2733 (parallel [(set (mem (reg 69)) ...)
2734 (set (reg 69) ...)])
2735 which is not well-defined as to order of actions.
2736 (Besides, reload can't handle output reloads for this.)
2738 The problem can also happen if the dest of I3 is a memory ref,
2739 if another dest in I2 is an indirect memory ref. */
2740 for (i = 0; i < XVECLEN (p2, 0); i++)
2741 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2742 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2743 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2744 SET_DEST (XVECEXP (p2, 0, i))))
2745 break;
2747 if (i == XVECLEN (p2, 0))
2748 for (i = 0; i < XVECLEN (p2, 0); i++)
2749 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2750 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2752 combine_merges++;
2754 subst_insn = i3;
2755 subst_low_luid = DF_INSN_LUID (i2);
2757 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2758 i2src = SET_SRC (XVECEXP (p2, 0, i));
2759 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2760 i2dest_killed = dead_or_set_p (i2, i2dest);
2762 /* Replace the dest in I2 with our dest and make the resulting
2763 insn the new pattern for I3. Then skip to where we validate
2764 the pattern. Everything was set up above. */
2765 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2766 newpat = p2;
2767 i3_subst_into_i2 = 1;
2768 goto validate_replacement;
2772 /* If I2 is setting a pseudo to a constant and I3 is setting some
2773 sub-part of it to another constant, merge them by making a new
2774 constant. */
2775 if (i1 == 0
2776 && (temp = single_set (i2)) != 0
2777 && (CONST_INT_P (SET_SRC (temp))
2778 || GET_CODE (SET_SRC (temp)) == CONST_DOUBLE)
2779 && GET_CODE (PATTERN (i3)) == SET
2780 && (CONST_INT_P (SET_SRC (PATTERN (i3)))
2781 || GET_CODE (SET_SRC (PATTERN (i3))) == CONST_DOUBLE)
2782 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp)))
2784 rtx dest = SET_DEST (PATTERN (i3));
2785 int offset = -1;
2786 int width = 0;
2788 if (GET_CODE (dest) == ZERO_EXTRACT)
2790 if (CONST_INT_P (XEXP (dest, 1))
2791 && CONST_INT_P (XEXP (dest, 2)))
2793 width = INTVAL (XEXP (dest, 1));
2794 offset = INTVAL (XEXP (dest, 2));
2795 dest = XEXP (dest, 0);
2796 if (BITS_BIG_ENDIAN)
2797 offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
2800 else
2802 if (GET_CODE (dest) == STRICT_LOW_PART)
2803 dest = XEXP (dest, 0);
2804 width = GET_MODE_PRECISION (GET_MODE (dest));
2805 offset = 0;
2808 if (offset >= 0)
2810 /* If this is the low part, we're done. */
2811 if (subreg_lowpart_p (dest))
2813 /* Handle the case where inner is twice the size of outer. */
2814 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
2815 == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
2816 offset += GET_MODE_PRECISION (GET_MODE (dest));
2817 /* Otherwise give up for now. */
2818 else
2819 offset = -1;
2822 if (offset >= 0
2823 && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
2824 <= HOST_BITS_PER_DOUBLE_INT))
2826 double_int m, o, i;
2827 rtx inner = SET_SRC (PATTERN (i3));
2828 rtx outer = SET_SRC (temp);
2830 o = rtx_to_double_int (outer);
2831 i = rtx_to_double_int (inner);
2833 m = double_int_mask (width);
2834 i = double_int_and (i, m);
2835 m = double_int_lshift (m, offset, HOST_BITS_PER_DOUBLE_INT, false);
2836 i = double_int_lshift (i, offset, HOST_BITS_PER_DOUBLE_INT, false);
2837 o = double_int_ior (double_int_and_not (o, m), i);
2839 combine_merges++;
2840 subst_insn = i3;
2841 subst_low_luid = DF_INSN_LUID (i2);
2842 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2843 i2dest = SET_DEST (temp);
2844 i2dest_killed = dead_or_set_p (i2, i2dest);
2846 /* Replace the source in I2 with the new constant and make the
2847 resulting insn the new pattern for I3. Then skip to where we
2848 validate the pattern. Everything was set up above. */
2849 SUBST (SET_SRC (temp),
2850 immed_double_int_const (o, GET_MODE (SET_DEST (temp))));
2852 newpat = PATTERN (i2);
2854 /* The dest of I3 has been replaced with the dest of I2. */
2855 changed_i3_dest = 1;
2856 goto validate_replacement;
2860 #ifndef HAVE_cc0
2861 /* If we have no I1 and I2 looks like:
2862 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2863 (set Y OP)])
2864 make up a dummy I1 that is
2865 (set Y OP)
2866 and change I2 to be
2867 (set (reg:CC X) (compare:CC Y (const_int 0)))
2869 (We can ignore any trailing CLOBBERs.)
2871 This undoes a previous combination and allows us to match a branch-and-
2872 decrement insn. */
2874 if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL
2875 && XVECLEN (PATTERN (i2), 0) >= 2
2876 && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET
2877 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2878 == MODE_CC)
2879 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2880 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2881 && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET
2882 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)))
2883 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2884 SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
2886 for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
2887 if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
2888 break;
2890 if (i == 1)
2892 /* We make I1 with the same INSN_UID as I2. This gives it
2893 the same DF_INSN_LUID for value tracking. Our fake I1 will
2894 never appear in the insn stream so giving it the same INSN_UID
2895 as I2 will not cause a problem. */
2897 i1 = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2,
2898 BLOCK_FOR_INSN (i2), XVECEXP (PATTERN (i2), 0, 1),
2899 INSN_LOCATOR (i2), -1, NULL_RTX);
2901 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2902 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2903 SET_DEST (PATTERN (i1)));
2904 SUBST_LINK (LOG_LINKS (i2), alloc_insn_link (i1, LOG_LINKS (i2)));
2907 #endif
2909 /* Verify that I2 and I1 are valid for combining. */
2910 if (! can_combine_p (i2, i3, i0, i1, NULL_RTX, NULL_RTX, &i2dest, &i2src)
2911 || (i1 && ! can_combine_p (i1, i3, i0, NULL_RTX, i2, NULL_RTX,
2912 &i1dest, &i1src))
2913 || (i0 && ! can_combine_p (i0, i3, NULL_RTX, NULL_RTX, i1, i2,
2914 &i0dest, &i0src)))
2916 undo_all ();
2917 return 0;
2920 /* Record whether I2DEST is used in I2SRC and similarly for the other
2921 cases. Knowing this will help in register status updating below. */
2922 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2923 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2924 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2925 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2926 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2927 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2928 i2dest_killed = dead_or_set_p (i2, i2dest);
2929 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2930 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2932 /* For the earlier insns, determine which of the subsequent ones they
2933 feed. */
2934 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2935 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2936 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2937 : (!reg_overlap_mentioned_p (i1dest, i0dest)
2938 && reg_overlap_mentioned_p (i0dest, i2src))));
2940 /* Ensure that I3's pattern can be the destination of combines. */
2941 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2942 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
2943 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
2944 || (i1dest_in_i0src && !i0_feeds_i1_n)),
2945 &i3dest_killed))
2947 undo_all ();
2948 return 0;
2951 /* See if any of the insns is a MULT operation. Unless one is, we will
2952 reject a combination that is, since it must be slower. Be conservative
2953 here. */
2954 if (GET_CODE (i2src) == MULT
2955 || (i1 != 0 && GET_CODE (i1src) == MULT)
2956 || (i0 != 0 && GET_CODE (i0src) == MULT)
2957 || (GET_CODE (PATTERN (i3)) == SET
2958 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
2959 have_mult = 1;
2961 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
2962 We used to do this EXCEPT in one case: I3 has a post-inc in an
2963 output operand. However, that exception can give rise to insns like
2964 mov r3,(r3)+
2965 which is a famous insn on the PDP-11 where the value of r3 used as the
2966 source was model-dependent. Avoid this sort of thing. */
2968 #if 0
2969 if (!(GET_CODE (PATTERN (i3)) == SET
2970 && REG_P (SET_SRC (PATTERN (i3)))
2971 && MEM_P (SET_DEST (PATTERN (i3)))
2972 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
2973 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
2974 /* It's not the exception. */
2975 #endif
2976 #ifdef AUTO_INC_DEC
2978 rtx link;
2979 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
2980 if (REG_NOTE_KIND (link) == REG_INC
2981 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
2982 || (i1 != 0
2983 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
2985 undo_all ();
2986 return 0;
2989 #endif
2991 /* See if the SETs in I1 or I2 need to be kept around in the merged
2992 instruction: whenever the value set there is still needed past I3.
2993 For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.
2995 For the SET in I1, we have two cases: If I1 and I2 independently
2996 feed into I3, the set in I1 needs to be kept around if I1DEST dies
2997 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
2998 in I1 needs to be kept around unless I1DEST dies or is set in either
2999 I2 or I3. The same consideration applies to I0. */
3001 added_sets_2 = !dead_or_set_p (i3, i2dest);
3003 if (i1)
3004 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3005 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3006 else
3007 added_sets_1 = 0;
3009 if (i0)
3010 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3011 || (i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3012 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)));
3013 else
3014 added_sets_0 = 0;
3016 /* We are about to copy insns for the case where they need to be kept
3017 around. Check that they can be copied in the merged instruction. */
3019 if (targetm.cannot_copy_insn_p
3020 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3021 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3022 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3024 undo_all ();
3025 return 0;
3028 /* If the set in I2 needs to be kept around, we must make a copy of
3029 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3030 PATTERN (I2), we are only substituting for the original I1DEST, not into
3031 an already-substituted copy. This also prevents making self-referential
3032 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3033 I2DEST. */
3035 if (added_sets_2)
3037 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3038 i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
3039 else
3040 i2pat = copy_rtx (PATTERN (i2));
3043 if (added_sets_1)
3045 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3046 i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
3047 else
3048 i1pat = copy_rtx (PATTERN (i1));
3051 if (added_sets_0)
3053 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3054 i0pat = gen_rtx_SET (VOIDmode, i0dest, copy_rtx (i0src));
3055 else
3056 i0pat = copy_rtx (PATTERN (i0));
3059 combine_merges++;
3061 /* Substitute in the latest insn for the regs set by the earlier ones. */
3063 maxreg = max_reg_num ();
3065 subst_insn = i3;
3067 #ifndef HAVE_cc0
3068 /* Many machines that don't use CC0 have insns that can both perform an
3069 arithmetic operation and set the condition code. These operations will
3070 be represented as a PARALLEL with the first element of the vector
3071 being a COMPARE of an arithmetic operation with the constant zero.
3072 The second element of the vector will set some pseudo to the result
3073 of the same arithmetic operation. If we simplify the COMPARE, we won't
3074 match such a pattern and so will generate an extra insn. Here we test
3075 for this case, where both the comparison and the operation result are
3076 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3077 I2SRC. Later we will make the PARALLEL that contains I2. */
3079 if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3080 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3081 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3082 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3084 rtx newpat_dest;
3085 rtx *cc_use_loc = NULL, cc_use_insn = NULL_RTX;
3086 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3087 enum machine_mode compare_mode, orig_compare_mode;
3088 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3090 newpat = PATTERN (i3);
3091 newpat_dest = SET_DEST (newpat);
3092 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3094 if (undobuf.other_insn == 0
3095 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3096 &cc_use_insn)))
3098 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3099 compare_code = simplify_compare_const (compare_code,
3100 op0, &op1);
3101 #ifdef CANONICALIZE_COMPARISON
3102 CANONICALIZE_COMPARISON (compare_code, op0, op1);
3103 #endif
3106 /* Do the rest only if op1 is const0_rtx, which may be the
3107 result of simplification. */
3108 if (op1 == const0_rtx)
3110 /* If a single use of the CC is found, prepare to modify it
3111 when SELECT_CC_MODE returns a new CC-class mode, or when
3112 the above simplify_compare_const() returned a new comparison
3113 operator. undobuf.other_insn is assigned the CC use insn
3114 when modifying it. */
3115 if (cc_use_loc)
3117 #ifdef SELECT_CC_MODE
3118 enum machine_mode new_mode
3119 = SELECT_CC_MODE (compare_code, op0, op1);
3120 if (new_mode != orig_compare_mode
3121 && can_change_dest_mode (SET_DEST (newpat),
3122 added_sets_2, new_mode))
3124 unsigned int regno = REGNO (newpat_dest);
3125 compare_mode = new_mode;
3126 if (regno < FIRST_PSEUDO_REGISTER)
3127 newpat_dest = gen_rtx_REG (compare_mode, regno);
3128 else
3130 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3131 newpat_dest = regno_reg_rtx[regno];
3134 #endif
3135 /* Cases for modifying the CC-using comparison. */
3136 if (compare_code != orig_compare_code
3137 /* ??? Do we need to verify the zero rtx? */
3138 && XEXP (*cc_use_loc, 1) == const0_rtx)
3140 /* Replace cc_use_loc with entire new RTX. */
3141 SUBST (*cc_use_loc,
3142 gen_rtx_fmt_ee (compare_code, compare_mode,
3143 newpat_dest, const0_rtx));
3144 undobuf.other_insn = cc_use_insn;
3146 else if (compare_mode != orig_compare_mode)
3148 /* Just replace the CC reg with a new mode. */
3149 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3150 undobuf.other_insn = cc_use_insn;
3154 /* Now we modify the current newpat:
3155 First, SET_DEST(newpat) is updated if the CC mode has been
3156 altered. For targets without SELECT_CC_MODE, this should be
3157 optimized away. */
3158 if (compare_mode != orig_compare_mode)
3159 SUBST (SET_DEST (newpat), newpat_dest);
3160 /* This is always done to propagate i2src into newpat. */
3161 SUBST (SET_SRC (newpat),
3162 gen_rtx_COMPARE (compare_mode, op0, op1));
3163 /* Create new version of i2pat if needed; the below PARALLEL
3164 creation needs this to work correctly. */
3165 if (! rtx_equal_p (i2src, op0))
3166 i2pat = gen_rtx_SET (VOIDmode, i2dest, op0);
3167 i2_is_used = 1;
3170 #endif
3172 if (i2_is_used == 0)
3174 /* It is possible that the source of I2 or I1 may be performing
3175 an unneeded operation, such as a ZERO_EXTEND of something
3176 that is known to have the high part zero. Handle that case
3177 by letting subst look at the inner insns.
3179 Another way to do this would be to have a function that tries
3180 to simplify a single insn instead of merging two or more
3181 insns. We don't do this because of the potential of infinite
3182 loops and because of the potential extra memory required.
3183 However, doing it the way we are is a bit of a kludge and
3184 doesn't catch all cases.
3186 But only do this if -fexpensive-optimizations since it slows
3187 things down and doesn't usually win.
3189 This is not done in the COMPARE case above because the
3190 unmodified I2PAT is used in the PARALLEL and so a pattern
3191 with a modified I2SRC would not match. */
3193 if (flag_expensive_optimizations)
3195 /* Pass pc_rtx so no substitutions are done, just
3196 simplifications. */
3197 if (i1)
3199 subst_low_luid = DF_INSN_LUID (i1);
3200 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3203 subst_low_luid = DF_INSN_LUID (i2);
3204 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3207 n_occurrences = 0; /* `subst' counts here */
3208 subst_low_luid = DF_INSN_LUID (i2);
3210 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3211 copy of I2SRC each time we substitute it, in order to avoid creating
3212 self-referential RTL when we will be substituting I1SRC for I1DEST
3213 later. Likewise if I0 feeds into I2, either directly or indirectly
3214 through I1, and I0DEST is in I0SRC. */
3215 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3216 (i1_feeds_i2_n && i1dest_in_i1src)
3217 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3218 && i0dest_in_i0src));
3219 substed_i2 = 1;
3221 /* Record whether I2's body now appears within I3's body. */
3222 i2_is_used = n_occurrences;
3225 /* If we already got a failure, don't try to do more. Otherwise, try to
3226 substitute I1 if we have it. */
3228 if (i1 && GET_CODE (newpat) != CLOBBER)
3230 /* Check that an autoincrement side-effect on I1 has not been lost.
3231 This happens if I1DEST is mentioned in I2 and dies there, and
3232 has disappeared from the new pattern. */
3233 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3234 && i1_feeds_i2_n
3235 && dead_or_set_p (i2, i1dest)
3236 && !reg_overlap_mentioned_p (i1dest, newpat))
3237 /* Before we can do this substitution, we must redo the test done
3238 above (see detailed comments there) that ensures I1DEST isn't
3239 mentioned in any SETs in NEWPAT that are field assignments. */
3240 || !combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX, NULL_RTX,
3241 0, 0, 0))
3243 undo_all ();
3244 return 0;
3247 n_occurrences = 0;
3248 subst_low_luid = DF_INSN_LUID (i1);
3250 /* If the following substitution will modify I1SRC, make a copy of it
3251 for the case where it is substituted for I1DEST in I2PAT later. */
3252 if (added_sets_2 && i1_feeds_i2_n)
3253 i1src_copy = copy_rtx (i1src);
3255 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3256 copy of I1SRC each time we substitute it, in order to avoid creating
3257 self-referential RTL when we will be substituting I0SRC for I0DEST
3258 later. */
3259 newpat = subst (newpat, i1dest, i1src, 0, 0,
3260 i0_feeds_i1_n && i0dest_in_i0src);
3261 substed_i1 = 1;
3263 /* Record whether I1's body now appears within I3's body. */
3264 i1_is_used = n_occurrences;
3267 /* Likewise for I0 if we have it. */
3269 if (i0 && GET_CODE (newpat) != CLOBBER)
3271 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3272 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3273 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3274 && !reg_overlap_mentioned_p (i0dest, newpat))
3275 || !combinable_i3pat (NULL_RTX, &newpat, i0dest, NULL_RTX, NULL_RTX,
3276 0, 0, 0))
3278 undo_all ();
3279 return 0;
3282 /* If the following substitution will modify I0SRC, make a copy of it
3283 for the case where it is substituted for I0DEST in I1PAT later. */
3284 if (added_sets_1 && i0_feeds_i1_n)
3285 i0src_copy = copy_rtx (i0src);
3286 /* And a copy for I0DEST in I2PAT substitution. */
3287 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3288 || (i0_feeds_i2_n)))
3289 i0src_copy2 = copy_rtx (i0src);
3291 n_occurrences = 0;
3292 subst_low_luid = DF_INSN_LUID (i0);
3293 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3294 substed_i0 = 1;
3297 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3298 to count all the ways that I2SRC and I1SRC can be used. */
3299 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3300 && i2_is_used + added_sets_2 > 1)
3301 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3302 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3303 > 1))
3304 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3305 && (n_occurrences + added_sets_0
3306 + (added_sets_1 && i0_feeds_i1_n)
3307 + (added_sets_2 && i0_feeds_i2_n)
3308 > 1))
3309 /* Fail if we tried to make a new register. */
3310 || max_reg_num () != maxreg
3311 /* Fail if we couldn't do something and have a CLOBBER. */
3312 || GET_CODE (newpat) == CLOBBER
3313 /* Fail if this new pattern is a MULT and we didn't have one before
3314 at the outer level. */
3315 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3316 && ! have_mult))
3318 undo_all ();
3319 return 0;
3322 /* If the actions of the earlier insns must be kept
3323 in addition to substituting them into the latest one,
3324 we must make a new PARALLEL for the latest insn
3325 to hold additional the SETs. */
3327 if (added_sets_0 || added_sets_1 || added_sets_2)
3329 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3330 combine_extras++;
3332 if (GET_CODE (newpat) == PARALLEL)
3334 rtvec old = XVEC (newpat, 0);
3335 total_sets = XVECLEN (newpat, 0) + extra_sets;
3336 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3337 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3338 sizeof (old->elem[0]) * old->num_elem);
3340 else
3342 rtx old = newpat;
3343 total_sets = 1 + extra_sets;
3344 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3345 XVECEXP (newpat, 0, 0) = old;
3348 if (added_sets_0)
3349 XVECEXP (newpat, 0, --total_sets) = i0pat;
3351 if (added_sets_1)
3353 rtx t = i1pat;
3354 if (i0_feeds_i1_n)
3355 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3357 XVECEXP (newpat, 0, --total_sets) = t;
3359 if (added_sets_2)
3361 rtx t = i2pat;
3362 if (i1_feeds_i2_n)
3363 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3364 i0_feeds_i1_n && i0dest_in_i0src);
3365 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3366 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3368 XVECEXP (newpat, 0, --total_sets) = t;
3372 validate_replacement:
3374 /* Note which hard regs this insn has as inputs. */
3375 mark_used_regs_combine (newpat);
3377 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3378 consider splitting this pattern, we might need these clobbers. */
3379 if (i1 && GET_CODE (newpat) == PARALLEL
3380 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3382 int len = XVECLEN (newpat, 0);
3384 newpat_vec_with_clobbers = rtvec_alloc (len);
3385 for (i = 0; i < len; i++)
3386 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3389 /* Is the result of combination a valid instruction? */
3390 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3392 /* If the result isn't valid, see if it is a PARALLEL of two SETs where
3393 the second SET's destination is a register that is unused and isn't
3394 marked as an instruction that might trap in an EH region. In that case,
3395 we just need the first SET. This can occur when simplifying a divmod
3396 insn. We *must* test for this case here because the code below that
3397 splits two independent SETs doesn't handle this case correctly when it
3398 updates the register status.
3400 It's pointless doing this if we originally had two sets, one from
3401 i3, and one from i2. Combining then splitting the parallel results
3402 in the original i2 again plus an invalid insn (which we delete).
3403 The net effect is only to move instructions around, which makes
3404 debug info less accurate.
3406 Also check the case where the first SET's destination is unused.
3407 That would not cause incorrect code, but does cause an unneeded
3408 insn to remain. */
3410 if (insn_code_number < 0
3411 && !(added_sets_2 && i1 == 0)
3412 && GET_CODE (newpat) == PARALLEL
3413 && XVECLEN (newpat, 0) == 2
3414 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3415 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3416 && asm_noperands (newpat) < 0)
3418 rtx set0 = XVECEXP (newpat, 0, 0);
3419 rtx set1 = XVECEXP (newpat, 0, 1);
3421 if (((REG_P (SET_DEST (set1))
3422 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3423 || (GET_CODE (SET_DEST (set1)) == SUBREG
3424 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3425 && insn_nothrow_p (i3)
3426 && !side_effects_p (SET_SRC (set1)))
3428 newpat = set0;
3429 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3432 else if (((REG_P (SET_DEST (set0))
3433 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3434 || (GET_CODE (SET_DEST (set0)) == SUBREG
3435 && find_reg_note (i3, REG_UNUSED,
3436 SUBREG_REG (SET_DEST (set0)))))
3437 && insn_nothrow_p (i3)
3438 && !side_effects_p (SET_SRC (set0)))
3440 newpat = set1;
3441 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3443 if (insn_code_number >= 0)
3444 changed_i3_dest = 1;
3448 /* If we were combining three insns and the result is a simple SET
3449 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3450 insns. There are two ways to do this. It can be split using a
3451 machine-specific method (like when you have an addition of a large
3452 constant) or by combine in the function find_split_point. */
3454 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3455 && asm_noperands (newpat) < 0)
3457 rtx parallel, m_split, *split;
3459 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3460 use I2DEST as a scratch register will help. In the latter case,
3461 convert I2DEST to the mode of the source of NEWPAT if we can. */
3463 m_split = combine_split_insns (newpat, i3);
3465 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3466 inputs of NEWPAT. */
3468 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3469 possible to try that as a scratch reg. This would require adding
3470 more code to make it work though. */
3472 if (m_split == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3474 enum machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3476 /* First try to split using the original register as a
3477 scratch register. */
3478 parallel = gen_rtx_PARALLEL (VOIDmode,
3479 gen_rtvec (2, newpat,
3480 gen_rtx_CLOBBER (VOIDmode,
3481 i2dest)));
3482 m_split = combine_split_insns (parallel, i3);
3484 /* If that didn't work, try changing the mode of I2DEST if
3485 we can. */
3486 if (m_split == 0
3487 && new_mode != GET_MODE (i2dest)
3488 && new_mode != VOIDmode
3489 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3491 enum machine_mode old_mode = GET_MODE (i2dest);
3492 rtx ni2dest;
3494 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3495 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3496 else
3498 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3499 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3502 parallel = (gen_rtx_PARALLEL
3503 (VOIDmode,
3504 gen_rtvec (2, newpat,
3505 gen_rtx_CLOBBER (VOIDmode,
3506 ni2dest))));
3507 m_split = combine_split_insns (parallel, i3);
3509 if (m_split == 0
3510 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3512 struct undo *buf;
3514 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3515 buf = undobuf.undos;
3516 undobuf.undos = buf->next;
3517 buf->next = undobuf.frees;
3518 undobuf.frees = buf;
3522 i2scratch = m_split != 0;
3525 /* If recog_for_combine has discarded clobbers, try to use them
3526 again for the split. */
3527 if (m_split == 0 && newpat_vec_with_clobbers)
3529 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3530 m_split = combine_split_insns (parallel, i3);
3533 if (m_split && NEXT_INSN (m_split) == NULL_RTX)
3535 m_split = PATTERN (m_split);
3536 insn_code_number = recog_for_combine (&m_split, i3, &new_i3_notes);
3537 if (insn_code_number >= 0)
3538 newpat = m_split;
3540 else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX
3541 && (next_nonnote_nondebug_insn (i2) == i3
3542 || ! use_crosses_set_p (PATTERN (m_split), DF_INSN_LUID (i2))))
3544 rtx i2set, i3set;
3545 rtx newi3pat = PATTERN (NEXT_INSN (m_split));
3546 newi2pat = PATTERN (m_split);
3548 i3set = single_set (NEXT_INSN (m_split));
3549 i2set = single_set (m_split);
3551 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3553 /* If I2 or I3 has multiple SETs, we won't know how to track
3554 register status, so don't use these insns. If I2's destination
3555 is used between I2 and I3, we also can't use these insns. */
3557 if (i2_code_number >= 0 && i2set && i3set
3558 && (next_nonnote_nondebug_insn (i2) == i3
3559 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3560 insn_code_number = recog_for_combine (&newi3pat, i3,
3561 &new_i3_notes);
3562 if (insn_code_number >= 0)
3563 newpat = newi3pat;
3565 /* It is possible that both insns now set the destination of I3.
3566 If so, we must show an extra use of it. */
3568 if (insn_code_number >= 0)
3570 rtx new_i3_dest = SET_DEST (i3set);
3571 rtx new_i2_dest = SET_DEST (i2set);
3573 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3574 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3575 || GET_CODE (new_i3_dest) == SUBREG)
3576 new_i3_dest = XEXP (new_i3_dest, 0);
3578 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3579 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3580 || GET_CODE (new_i2_dest) == SUBREG)
3581 new_i2_dest = XEXP (new_i2_dest, 0);
3583 if (REG_P (new_i3_dest)
3584 && REG_P (new_i2_dest)
3585 && REGNO (new_i3_dest) == REGNO (new_i2_dest))
3586 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3590 /* If we can split it and use I2DEST, go ahead and see if that
3591 helps things be recognized. Verify that none of the registers
3592 are set between I2 and I3. */
3593 if (insn_code_number < 0
3594 && (split = find_split_point (&newpat, i3, false)) != 0
3595 #ifdef HAVE_cc0
3596 && REG_P (i2dest)
3597 #endif
3598 /* We need I2DEST in the proper mode. If it is a hard register
3599 or the only use of a pseudo, we can change its mode.
3600 Make sure we don't change a hard register to have a mode that
3601 isn't valid for it, or change the number of registers. */
3602 && (GET_MODE (*split) == GET_MODE (i2dest)
3603 || GET_MODE (*split) == VOIDmode
3604 || can_change_dest_mode (i2dest, added_sets_2,
3605 GET_MODE (*split)))
3606 && (next_nonnote_nondebug_insn (i2) == i3
3607 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3608 /* We can't overwrite I2DEST if its value is still used by
3609 NEWPAT. */
3610 && ! reg_referenced_p (i2dest, newpat))
3612 rtx newdest = i2dest;
3613 enum rtx_code split_code = GET_CODE (*split);
3614 enum machine_mode split_mode = GET_MODE (*split);
3615 bool subst_done = false;
3616 newi2pat = NULL_RTX;
3618 i2scratch = true;
3620 /* *SPLIT may be part of I2SRC, so make sure we have the
3621 original expression around for later debug processing.
3622 We should not need I2SRC any more in other cases. */
3623 if (MAY_HAVE_DEBUG_INSNS)
3624 i2src = copy_rtx (i2src);
3625 else
3626 i2src = NULL;
3628 /* Get NEWDEST as a register in the proper mode. We have already
3629 validated that we can do this. */
3630 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3632 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3633 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3634 else
3636 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3637 newdest = regno_reg_rtx[REGNO (i2dest)];
3641 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3642 an ASHIFT. This can occur if it was inside a PLUS and hence
3643 appeared to be a memory address. This is a kludge. */
3644 if (split_code == MULT
3645 && CONST_INT_P (XEXP (*split, 1))
3646 && INTVAL (XEXP (*split, 1)) > 0
3647 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3649 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3650 XEXP (*split, 0), GEN_INT (i)));
3651 /* Update split_code because we may not have a multiply
3652 anymore. */
3653 split_code = GET_CODE (*split);
3656 #ifdef INSN_SCHEDULING
3657 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3658 be written as a ZERO_EXTEND. */
3659 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3661 #ifdef LOAD_EXTEND_OP
3662 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3663 what it really is. */
3664 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3665 == SIGN_EXTEND)
3666 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3667 SUBREG_REG (*split)));
3668 else
3669 #endif
3670 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3671 SUBREG_REG (*split)));
3673 #endif
3675 /* Attempt to split binary operators using arithmetic identities. */
3676 if (BINARY_P (SET_SRC (newpat))
3677 && split_mode == GET_MODE (SET_SRC (newpat))
3678 && ! side_effects_p (SET_SRC (newpat)))
3680 rtx setsrc = SET_SRC (newpat);
3681 enum machine_mode mode = GET_MODE (setsrc);
3682 enum rtx_code code = GET_CODE (setsrc);
3683 rtx src_op0 = XEXP (setsrc, 0);
3684 rtx src_op1 = XEXP (setsrc, 1);
3686 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3687 if (rtx_equal_p (src_op0, src_op1))
3689 newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
3690 SUBST (XEXP (setsrc, 0), newdest);
3691 SUBST (XEXP (setsrc, 1), newdest);
3692 subst_done = true;
3694 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3695 else if ((code == PLUS || code == MULT)
3696 && GET_CODE (src_op0) == code
3697 && GET_CODE (XEXP (src_op0, 0)) == code
3698 && (INTEGRAL_MODE_P (mode)
3699 || (FLOAT_MODE_P (mode)
3700 && flag_unsafe_math_optimizations)))
3702 rtx p = XEXP (XEXP (src_op0, 0), 0);
3703 rtx q = XEXP (XEXP (src_op0, 0), 1);
3704 rtx r = XEXP (src_op0, 1);
3705 rtx s = src_op1;
3707 /* Split both "((X op Y) op X) op Y" and
3708 "((X op Y) op Y) op X" as "T op T" where T is
3709 "X op Y". */
3710 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3711 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3713 newi2pat = gen_rtx_SET (VOIDmode, newdest,
3714 XEXP (src_op0, 0));
3715 SUBST (XEXP (setsrc, 0), newdest);
3716 SUBST (XEXP (setsrc, 1), newdest);
3717 subst_done = true;
3719 /* Split "((X op X) op Y) op Y)" as "T op T" where
3720 T is "X op Y". */
3721 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3723 rtx tmp = simplify_gen_binary (code, mode, p, r);
3724 newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
3725 SUBST (XEXP (setsrc, 0), newdest);
3726 SUBST (XEXP (setsrc, 1), newdest);
3727 subst_done = true;
3732 if (!subst_done)
3734 newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
3735 SUBST (*split, newdest);
3738 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3740 /* recog_for_combine might have added CLOBBERs to newi2pat.
3741 Make sure NEWPAT does not depend on the clobbered regs. */
3742 if (GET_CODE (newi2pat) == PARALLEL)
3743 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3744 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3746 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3747 if (reg_overlap_mentioned_p (reg, newpat))
3749 undo_all ();
3750 return 0;
3754 /* If the split point was a MULT and we didn't have one before,
3755 don't use one now. */
3756 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3757 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3761 /* Check for a case where we loaded from memory in a narrow mode and
3762 then sign extended it, but we need both registers. In that case,
3763 we have a PARALLEL with both loads from the same memory location.
3764 We can split this into a load from memory followed by a register-register
3765 copy. This saves at least one insn, more if register allocation can
3766 eliminate the copy.
3768 We cannot do this if the destination of the first assignment is a
3769 condition code register or cc0. We eliminate this case by making sure
3770 the SET_DEST and SET_SRC have the same mode.
3772 We cannot do this if the destination of the second assignment is
3773 a register that we have already assumed is zero-extended. Similarly
3774 for a SUBREG of such a register. */
3776 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3777 && GET_CODE (newpat) == PARALLEL
3778 && XVECLEN (newpat, 0) == 2
3779 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3780 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3781 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3782 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3783 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3784 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3785 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3786 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3787 DF_INSN_LUID (i2))
3788 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3789 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3790 && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
3791 (REG_P (temp)
3792 && VEC_index (reg_stat_type, reg_stat,
3793 REGNO (temp))->nonzero_bits != 0
3794 && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
3795 && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
3796 && (VEC_index (reg_stat_type, reg_stat,
3797 REGNO (temp))->nonzero_bits
3798 != GET_MODE_MASK (word_mode))))
3799 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3800 && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3801 (REG_P (temp)
3802 && VEC_index (reg_stat_type, reg_stat,
3803 REGNO (temp))->nonzero_bits != 0
3804 && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
3805 && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
3806 && (VEC_index (reg_stat_type, reg_stat,
3807 REGNO (temp))->nonzero_bits
3808 != GET_MODE_MASK (word_mode)))))
3809 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3810 SET_SRC (XVECEXP (newpat, 0, 1)))
3811 && ! find_reg_note (i3, REG_UNUSED,
3812 SET_DEST (XVECEXP (newpat, 0, 0))))
3814 rtx ni2dest;
3816 newi2pat = XVECEXP (newpat, 0, 0);
3817 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3818 newpat = XVECEXP (newpat, 0, 1);
3819 SUBST (SET_SRC (newpat),
3820 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3821 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3823 if (i2_code_number >= 0)
3824 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3826 if (insn_code_number >= 0)
3827 swap_i2i3 = 1;
3830 /* Similarly, check for a case where we have a PARALLEL of two independent
3831 SETs but we started with three insns. In this case, we can do the sets
3832 as two separate insns. This case occurs when some SET allows two
3833 other insns to combine, but the destination of that SET is still live. */
3835 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3836 && GET_CODE (newpat) == PARALLEL
3837 && XVECLEN (newpat, 0) == 2
3838 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3839 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3840 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3841 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3842 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3843 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3844 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3845 XVECEXP (newpat, 0, 0))
3846 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3847 XVECEXP (newpat, 0, 1))
3848 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3849 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3851 /* Normally, it doesn't matter which of the two is done first,
3852 but the one that references cc0 can't be the second, and
3853 one which uses any regs/memory set in between i2 and i3 can't
3854 be first. */
3855 if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3856 DF_INSN_LUID (i2))
3857 #ifdef HAVE_cc0
3858 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))
3859 #endif
3862 newi2pat = XVECEXP (newpat, 0, 1);
3863 newpat = XVECEXP (newpat, 0, 0);
3865 else if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 0)),
3866 DF_INSN_LUID (i2))
3867 #ifdef HAVE_cc0
3868 && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 1))
3869 #endif
3872 newi2pat = XVECEXP (newpat, 0, 0);
3873 newpat = XVECEXP (newpat, 0, 1);
3875 else
3877 undo_all ();
3878 return 0;
3881 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3883 if (i2_code_number >= 0)
3885 /* recog_for_combine might have added CLOBBERs to newi2pat.
3886 Make sure NEWPAT does not depend on the clobbered regs. */
3887 if (GET_CODE (newi2pat) == PARALLEL)
3889 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3890 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3892 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3893 if (reg_overlap_mentioned_p (reg, newpat))
3895 undo_all ();
3896 return 0;
3901 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3905 /* If it still isn't recognized, fail and change things back the way they
3906 were. */
3907 if ((insn_code_number < 0
3908 /* Is the result a reasonable ASM_OPERANDS? */
3909 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
3911 undo_all ();
3912 return 0;
3915 /* If we had to change another insn, make sure it is valid also. */
3916 if (undobuf.other_insn)
3918 CLEAR_HARD_REG_SET (newpat_used_regs);
3920 other_pat = PATTERN (undobuf.other_insn);
3921 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
3922 &new_other_notes);
3924 if (other_code_number < 0 && ! check_asm_operands (other_pat))
3926 undo_all ();
3927 return 0;
3931 #ifdef HAVE_cc0
3932 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
3933 they are adjacent to each other or not. */
3935 rtx p = prev_nonnote_insn (i3);
3936 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
3937 && sets_cc0_p (newi2pat))
3939 undo_all ();
3940 return 0;
3943 #endif
3945 /* Only allow this combination if insn_rtx_costs reports that the
3946 replacement instructions are cheaper than the originals. */
3947 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
3949 undo_all ();
3950 return 0;
3953 if (MAY_HAVE_DEBUG_INSNS)
3955 struct undo *undo;
3957 for (undo = undobuf.undos; undo; undo = undo->next)
3958 if (undo->kind == UNDO_MODE)
3960 rtx reg = *undo->where.r;
3961 enum machine_mode new_mode = GET_MODE (reg);
3962 enum machine_mode old_mode = undo->old_contents.m;
3964 /* Temporarily revert mode back. */
3965 adjust_reg_mode (reg, old_mode);
3967 if (reg == i2dest && i2scratch)
3969 /* If we used i2dest as a scratch register with a
3970 different mode, substitute it for the original
3971 i2src while its original mode is temporarily
3972 restored, and then clear i2scratch so that we don't
3973 do it again later. */
3974 propagate_for_debug (i2, last_combined_insn, reg, i2src);
3975 i2scratch = false;
3976 /* Put back the new mode. */
3977 adjust_reg_mode (reg, new_mode);
3979 else
3981 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
3982 rtx first, last;
3984 if (reg == i2dest)
3986 first = i2;
3987 last = last_combined_insn;
3989 else
3991 first = i3;
3992 last = undobuf.other_insn;
3993 gcc_assert (last);
3994 if (DF_INSN_LUID (last)
3995 < DF_INSN_LUID (last_combined_insn))
3996 last = last_combined_insn;
3999 /* We're dealing with a reg that changed mode but not
4000 meaning, so we want to turn it into a subreg for
4001 the new mode. However, because of REG sharing and
4002 because its mode had already changed, we have to do
4003 it in two steps. First, replace any debug uses of
4004 reg, with its original mode temporarily restored,
4005 with this copy we have created; then, replace the
4006 copy with the SUBREG of the original shared reg,
4007 once again changed to the new mode. */
4008 propagate_for_debug (first, last, reg, tempreg);
4009 adjust_reg_mode (reg, new_mode);
4010 propagate_for_debug (first, last, tempreg,
4011 lowpart_subreg (old_mode, reg, new_mode));
4016 /* If we will be able to accept this, we have made a
4017 change to the destination of I3. This requires us to
4018 do a few adjustments. */
4020 if (changed_i3_dest)
4022 PATTERN (i3) = newpat;
4023 adjust_for_new_dest (i3);
4026 /* We now know that we can do this combination. Merge the insns and
4027 update the status of registers and LOG_LINKS. */
4029 if (undobuf.other_insn)
4031 rtx note, next;
4033 PATTERN (undobuf.other_insn) = other_pat;
4035 /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
4036 are still valid. Then add any non-duplicate notes added by
4037 recog_for_combine. */
4038 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4040 next = XEXP (note, 1);
4042 if (REG_NOTE_KIND (note) == REG_UNUSED
4043 && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
4044 remove_note (undobuf.other_insn, note);
4047 distribute_notes (new_other_notes, undobuf.other_insn,
4048 undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX,
4049 NULL_RTX);
4052 if (swap_i2i3)
4054 rtx insn;
4055 struct insn_link *link;
4056 rtx ni2dest;
4058 /* I3 now uses what used to be its destination and which is now
4059 I2's destination. This requires us to do a few adjustments. */
4060 PATTERN (i3) = newpat;
4061 adjust_for_new_dest (i3);
4063 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4064 so we still will.
4066 However, some later insn might be using I2's dest and have
4067 a LOG_LINK pointing at I3. We must remove this link.
4068 The simplest way to remove the link is to point it at I1,
4069 which we know will be a NOTE. */
4071 /* newi2pat is usually a SET here; however, recog_for_combine might
4072 have added some clobbers. */
4073 if (GET_CODE (newi2pat) == PARALLEL)
4074 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4075 else
4076 ni2dest = SET_DEST (newi2pat);
4078 for (insn = NEXT_INSN (i3);
4079 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
4080 || insn != BB_HEAD (this_basic_block->next_bb));
4081 insn = NEXT_INSN (insn))
4083 if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
4085 FOR_EACH_LOG_LINK (link, insn)
4086 if (link->insn == i3)
4087 link->insn = i1;
4089 break;
4095 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4096 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4097 rtx midnotes = 0;
4098 int from_luid;
4099 /* Compute which registers we expect to eliminate. newi2pat may be setting
4100 either i3dest or i2dest, so we must check it. Also, i1dest may be the
4101 same as i3dest, in which case newi2pat may be setting i1dest. */
4102 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4103 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4104 || !i2dest_killed
4105 ? 0 : i2dest);
4106 rtx elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4107 || (newi2pat && reg_set_p (i1dest, newi2pat))
4108 || !i1dest_killed
4109 ? 0 : i1dest);
4110 rtx elim_i0 = (i0 == 0 || i0dest_in_i0src
4111 || (newi2pat && reg_set_p (i0dest, newi2pat))
4112 || !i0dest_killed
4113 ? 0 : i0dest);
4115 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4116 clear them. */
4117 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4118 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4119 if (i1)
4120 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4121 if (i0)
4122 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4124 /* Ensure that we do not have something that should not be shared but
4125 occurs multiple times in the new insns. Check this by first
4126 resetting all the `used' flags and then copying anything is shared. */
4128 reset_used_flags (i3notes);
4129 reset_used_flags (i2notes);
4130 reset_used_flags (i1notes);
4131 reset_used_flags (i0notes);
4132 reset_used_flags (newpat);
4133 reset_used_flags (newi2pat);
4134 if (undobuf.other_insn)
4135 reset_used_flags (PATTERN (undobuf.other_insn));
4137 i3notes = copy_rtx_if_shared (i3notes);
4138 i2notes = copy_rtx_if_shared (i2notes);
4139 i1notes = copy_rtx_if_shared (i1notes);
4140 i0notes = copy_rtx_if_shared (i0notes);
4141 newpat = copy_rtx_if_shared (newpat);
4142 newi2pat = copy_rtx_if_shared (newi2pat);
4143 if (undobuf.other_insn)
4144 reset_used_flags (PATTERN (undobuf.other_insn));
4146 INSN_CODE (i3) = insn_code_number;
4147 PATTERN (i3) = newpat;
4149 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4151 rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4153 reset_used_flags (call_usage);
4154 call_usage = copy_rtx (call_usage);
4156 if (substed_i2)
4158 /* I2SRC must still be meaningful at this point. Some splitting
4159 operations can invalidate I2SRC, but those operations do not
4160 apply to calls. */
4161 gcc_assert (i2src);
4162 replace_rtx (call_usage, i2dest, i2src);
4165 if (substed_i1)
4166 replace_rtx (call_usage, i1dest, i1src);
4167 if (substed_i0)
4168 replace_rtx (call_usage, i0dest, i0src);
4170 CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4173 if (undobuf.other_insn)
4174 INSN_CODE (undobuf.other_insn) = other_code_number;
4176 /* We had one special case above where I2 had more than one set and
4177 we replaced a destination of one of those sets with the destination
4178 of I3. In that case, we have to update LOG_LINKS of insns later
4179 in this basic block. Note that this (expensive) case is rare.
4181 Also, in this case, we must pretend that all REG_NOTEs for I2
4182 actually came from I3, so that REG_UNUSED notes from I2 will be
4183 properly handled. */
4185 if (i3_subst_into_i2)
4187 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4188 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4189 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4190 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4191 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4192 && ! find_reg_note (i2, REG_UNUSED,
4193 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4194 for (temp = NEXT_INSN (i2);
4195 temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
4196 || BB_HEAD (this_basic_block) != temp);
4197 temp = NEXT_INSN (temp))
4198 if (temp != i3 && INSN_P (temp))
4199 FOR_EACH_LOG_LINK (link, temp)
4200 if (link->insn == i2)
4201 link->insn = i3;
4203 if (i3notes)
4205 rtx link = i3notes;
4206 while (XEXP (link, 1))
4207 link = XEXP (link, 1);
4208 XEXP (link, 1) = i2notes;
4210 else
4211 i3notes = i2notes;
4212 i2notes = 0;
4215 LOG_LINKS (i3) = NULL;
4216 REG_NOTES (i3) = 0;
4217 LOG_LINKS (i2) = NULL;
4218 REG_NOTES (i2) = 0;
4220 if (newi2pat)
4222 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4223 propagate_for_debug (i2, last_combined_insn, i2dest, i2src);
4224 INSN_CODE (i2) = i2_code_number;
4225 PATTERN (i2) = newi2pat;
4227 else
4229 if (MAY_HAVE_DEBUG_INSNS && i2src)
4230 propagate_for_debug (i2, last_combined_insn, i2dest, i2src);
4231 SET_INSN_DELETED (i2);
4234 if (i1)
4236 LOG_LINKS (i1) = NULL;
4237 REG_NOTES (i1) = 0;
4238 if (MAY_HAVE_DEBUG_INSNS)
4239 propagate_for_debug (i1, last_combined_insn, i1dest, i1src);
4240 SET_INSN_DELETED (i1);
4243 if (i0)
4245 LOG_LINKS (i0) = NULL;
4246 REG_NOTES (i0) = 0;
4247 if (MAY_HAVE_DEBUG_INSNS)
4248 propagate_for_debug (i0, last_combined_insn, i0dest, i0src);
4249 SET_INSN_DELETED (i0);
4252 /* Get death notes for everything that is now used in either I3 or
4253 I2 and used to die in a previous insn. If we built two new
4254 patterns, move from I1 to I2 then I2 to I3 so that we get the
4255 proper movement on registers that I2 modifies. */
4257 if (i0)
4258 from_luid = DF_INSN_LUID (i0);
4259 else if (i1)
4260 from_luid = DF_INSN_LUID (i1);
4261 else
4262 from_luid = DF_INSN_LUID (i2);
4263 if (newi2pat)
4264 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4265 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4267 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4268 if (i3notes)
4269 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
4270 elim_i2, elim_i1, elim_i0);
4271 if (i2notes)
4272 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
4273 elim_i2, elim_i1, elim_i0);
4274 if (i1notes)
4275 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
4276 elim_i2, elim_i1, elim_i0);
4277 if (i0notes)
4278 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL_RTX,
4279 elim_i2, elim_i1, elim_i0);
4280 if (midnotes)
4281 distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4282 elim_i2, elim_i1, elim_i0);
4284 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4285 know these are REG_UNUSED and want them to go to the desired insn,
4286 so we always pass it as i3. */
4288 if (newi2pat && new_i2_notes)
4289 distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX,
4290 NULL_RTX);
4292 if (new_i3_notes)
4293 distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX,
4294 NULL_RTX);
4296 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4297 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4298 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4299 in that case, it might delete I2. Similarly for I2 and I1.
4300 Show an additional death due to the REG_DEAD note we make here. If
4301 we discard it in distribute_notes, we will decrement it again. */
4303 if (i3dest_killed)
4305 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4306 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4307 NULL_RTX),
4308 NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1, elim_i0);
4309 else
4310 distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4311 NULL_RTX),
4312 NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4313 elim_i2, elim_i1, elim_i0);
4316 if (i2dest_in_i2src)
4318 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4319 if (newi2pat && reg_set_p (i2dest, newi2pat))
4320 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4321 NULL_RTX, NULL_RTX);
4322 else
4323 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4324 NULL_RTX, NULL_RTX, NULL_RTX);
4327 if (i1dest_in_i1src)
4329 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4330 if (newi2pat && reg_set_p (i1dest, newi2pat))
4331 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4332 NULL_RTX, NULL_RTX);
4333 else
4334 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4335 NULL_RTX, NULL_RTX, NULL_RTX);
4338 if (i0dest_in_i0src)
4340 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4341 if (newi2pat && reg_set_p (i0dest, newi2pat))
4342 distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4343 NULL_RTX, NULL_RTX);
4344 else
4345 distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4346 NULL_RTX, NULL_RTX, NULL_RTX);
4349 distribute_links (i3links);
4350 distribute_links (i2links);
4351 distribute_links (i1links);
4352 distribute_links (i0links);
4354 if (REG_P (i2dest))
4356 struct insn_link *link;
4357 rtx i2_insn = 0, i2_val = 0, set;
4359 /* The insn that used to set this register doesn't exist, and
4360 this life of the register may not exist either. See if one of
4361 I3's links points to an insn that sets I2DEST. If it does,
4362 that is now the last known value for I2DEST. If we don't update
4363 this and I2 set the register to a value that depended on its old
4364 contents, we will get confused. If this insn is used, thing
4365 will be set correctly in combine_instructions. */
4366 FOR_EACH_LOG_LINK (link, i3)
4367 if ((set = single_set (link->insn)) != 0
4368 && rtx_equal_p (i2dest, SET_DEST (set)))
4369 i2_insn = link->insn, i2_val = SET_SRC (set);
4371 record_value_for_reg (i2dest, i2_insn, i2_val);
4373 /* If the reg formerly set in I2 died only once and that was in I3,
4374 zero its use count so it won't make `reload' do any work. */
4375 if (! added_sets_2
4376 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4377 && ! i2dest_in_i2src)
4378 INC_REG_N_SETS (REGNO (i2dest), -1);
4381 if (i1 && REG_P (i1dest))
4383 struct insn_link *link;
4384 rtx i1_insn = 0, i1_val = 0, set;
4386 FOR_EACH_LOG_LINK (link, i3)
4387 if ((set = single_set (link->insn)) != 0
4388 && rtx_equal_p (i1dest, SET_DEST (set)))
4389 i1_insn = link->insn, i1_val = SET_SRC (set);
4391 record_value_for_reg (i1dest, i1_insn, i1_val);
4393 if (! added_sets_1 && ! i1dest_in_i1src)
4394 INC_REG_N_SETS (REGNO (i1dest), -1);
4397 if (i0 && REG_P (i0dest))
4399 struct insn_link *link;
4400 rtx i0_insn = 0, i0_val = 0, set;
4402 FOR_EACH_LOG_LINK (link, i3)
4403 if ((set = single_set (link->insn)) != 0
4404 && rtx_equal_p (i0dest, SET_DEST (set)))
4405 i0_insn = link->insn, i0_val = SET_SRC (set);
4407 record_value_for_reg (i0dest, i0_insn, i0_val);
4409 if (! added_sets_0 && ! i0dest_in_i0src)
4410 INC_REG_N_SETS (REGNO (i0dest), -1);
4413 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4414 been made to this insn. The order of
4415 set_nonzero_bits_and_sign_copies() is important. Because newi2pat
4416 can affect nonzero_bits of newpat */
4417 if (newi2pat)
4418 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4419 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4422 if (undobuf.other_insn != NULL_RTX)
4424 if (dump_file)
4426 fprintf (dump_file, "modifying other_insn ");
4427 dump_insn_slim (dump_file, undobuf.other_insn);
4429 df_insn_rescan (undobuf.other_insn);
4432 if (i0 && !(NOTE_P(i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4434 if (dump_file)
4436 fprintf (dump_file, "modifying insn i1 ");
4437 dump_insn_slim (dump_file, i0);
4439 df_insn_rescan (i0);
4442 if (i1 && !(NOTE_P(i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4444 if (dump_file)
4446 fprintf (dump_file, "modifying insn i1 ");
4447 dump_insn_slim (dump_file, i1);
4449 df_insn_rescan (i1);
4452 if (i2 && !(NOTE_P(i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4454 if (dump_file)
4456 fprintf (dump_file, "modifying insn i2 ");
4457 dump_insn_slim (dump_file, i2);
4459 df_insn_rescan (i2);
4462 if (i3 && !(NOTE_P(i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4464 if (dump_file)
4466 fprintf (dump_file, "modifying insn i3 ");
4467 dump_insn_slim (dump_file, i3);
4469 df_insn_rescan (i3);
4472 /* Set new_direct_jump_p if a new return or simple jump instruction
4473 has been created. Adjust the CFG accordingly. */
4475 if (returnjump_p (i3) || any_uncondjump_p (i3))
4477 *new_direct_jump_p = 1;
4478 mark_jump_label (PATTERN (i3), i3, 0);
4479 update_cfg_for_uncondjump (i3);
4482 if (undobuf.other_insn != NULL_RTX
4483 && (returnjump_p (undobuf.other_insn)
4484 || any_uncondjump_p (undobuf.other_insn)))
4486 *new_direct_jump_p = 1;
4487 update_cfg_for_uncondjump (undobuf.other_insn);
4490 /* A noop might also need cleaning up of CFG, if it comes from the
4491 simplification of a jump. */
4492 if (JUMP_P (i3)
4493 && GET_CODE (newpat) == SET
4494 && SET_SRC (newpat) == pc_rtx
4495 && SET_DEST (newpat) == pc_rtx)
4497 *new_direct_jump_p = 1;
4498 update_cfg_for_uncondjump (i3);
4501 if (undobuf.other_insn != NULL_RTX
4502 && JUMP_P (undobuf.other_insn)
4503 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4504 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4505 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4507 *new_direct_jump_p = 1;
4508 update_cfg_for_uncondjump (undobuf.other_insn);
4511 combine_successes++;
4512 undo_commit ();
4514 if (added_links_insn
4515 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4516 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4517 return added_links_insn;
4518 else
4519 return newi2pat ? i2 : i3;
4522 /* Undo all the modifications recorded in undobuf. */
4524 static void
4525 undo_all (void)
4527 struct undo *undo, *next;
4529 for (undo = undobuf.undos; undo; undo = next)
4531 next = undo->next;
4532 switch (undo->kind)
4534 case UNDO_RTX:
4535 *undo->where.r = undo->old_contents.r;
4536 break;
4537 case UNDO_INT:
4538 *undo->where.i = undo->old_contents.i;
4539 break;
4540 case UNDO_MODE:
4541 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4542 break;
4543 case UNDO_LINKS:
4544 *undo->where.l = undo->old_contents.l;
4545 break;
4546 default:
4547 gcc_unreachable ();
4550 undo->next = undobuf.frees;
4551 undobuf.frees = undo;
4554 undobuf.undos = 0;
4557 /* We've committed to accepting the changes we made. Move all
4558 of the undos to the free list. */
4560 static void
4561 undo_commit (void)
4563 struct undo *undo, *next;
4565 for (undo = undobuf.undos; undo; undo = next)
4567 next = undo->next;
4568 undo->next = undobuf.frees;
4569 undobuf.frees = undo;
4571 undobuf.undos = 0;
4574 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4575 where we have an arithmetic expression and return that point. LOC will
4576 be inside INSN.
4578 try_combine will call this function to see if an insn can be split into
4579 two insns. */
4581 static rtx *
4582 find_split_point (rtx *loc, rtx insn, bool set_src)
4584 rtx x = *loc;
4585 enum rtx_code code = GET_CODE (x);
4586 rtx *split;
4587 unsigned HOST_WIDE_INT len = 0;
4588 HOST_WIDE_INT pos = 0;
4589 int unsignedp = 0;
4590 rtx inner = NULL_RTX;
4592 /* First special-case some codes. */
4593 switch (code)
4595 case SUBREG:
4596 #ifdef INSN_SCHEDULING
4597 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4598 point. */
4599 if (MEM_P (SUBREG_REG (x)))
4600 return loc;
4601 #endif
4602 return find_split_point (&SUBREG_REG (x), insn, false);
4604 case MEM:
4605 #ifdef HAVE_lo_sum
4606 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4607 using LO_SUM and HIGH. */
4608 if (GET_CODE (XEXP (x, 0)) == CONST
4609 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
4611 enum machine_mode address_mode = get_address_mode (x);
4613 SUBST (XEXP (x, 0),
4614 gen_rtx_LO_SUM (address_mode,
4615 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4616 XEXP (x, 0)));
4617 return &XEXP (XEXP (x, 0), 0);
4619 #endif
4621 /* If we have a PLUS whose second operand is a constant and the
4622 address is not valid, perhaps will can split it up using
4623 the machine-specific way to split large constants. We use
4624 the first pseudo-reg (one of the virtual regs) as a placeholder;
4625 it will not remain in the result. */
4626 if (GET_CODE (XEXP (x, 0)) == PLUS
4627 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4628 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4629 MEM_ADDR_SPACE (x)))
4631 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4632 rtx seq = combine_split_insns (gen_rtx_SET (VOIDmode, reg,
4633 XEXP (x, 0)),
4634 subst_insn);
4636 /* This should have produced two insns, each of which sets our
4637 placeholder. If the source of the second is a valid address,
4638 we can make put both sources together and make a split point
4639 in the middle. */
4641 if (seq
4642 && NEXT_INSN (seq) != NULL_RTX
4643 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4644 && NONJUMP_INSN_P (seq)
4645 && GET_CODE (PATTERN (seq)) == SET
4646 && SET_DEST (PATTERN (seq)) == reg
4647 && ! reg_mentioned_p (reg,
4648 SET_SRC (PATTERN (seq)))
4649 && NONJUMP_INSN_P (NEXT_INSN (seq))
4650 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4651 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4652 && memory_address_addr_space_p
4653 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4654 MEM_ADDR_SPACE (x)))
4656 rtx src1 = SET_SRC (PATTERN (seq));
4657 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4659 /* Replace the placeholder in SRC2 with SRC1. If we can
4660 find where in SRC2 it was placed, that can become our
4661 split point and we can replace this address with SRC2.
4662 Just try two obvious places. */
4664 src2 = replace_rtx (src2, reg, src1);
4665 split = 0;
4666 if (XEXP (src2, 0) == src1)
4667 split = &XEXP (src2, 0);
4668 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4669 && XEXP (XEXP (src2, 0), 0) == src1)
4670 split = &XEXP (XEXP (src2, 0), 0);
4672 if (split)
4674 SUBST (XEXP (x, 0), src2);
4675 return split;
4679 /* If that didn't work, perhaps the first operand is complex and
4680 needs to be computed separately, so make a split point there.
4681 This will occur on machines that just support REG + CONST
4682 and have a constant moved through some previous computation. */
4684 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4685 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4686 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4687 return &XEXP (XEXP (x, 0), 0);
4690 /* If we have a PLUS whose first operand is complex, try computing it
4691 separately by making a split there. */
4692 if (GET_CODE (XEXP (x, 0)) == PLUS
4693 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4694 MEM_ADDR_SPACE (x))
4695 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4696 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4697 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4698 return &XEXP (XEXP (x, 0), 0);
4699 break;
4701 case SET:
4702 #ifdef HAVE_cc0
4703 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4704 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4705 we need to put the operand into a register. So split at that
4706 point. */
4708 if (SET_DEST (x) == cc0_rtx
4709 && GET_CODE (SET_SRC (x)) != COMPARE
4710 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4711 && !OBJECT_P (SET_SRC (x))
4712 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4713 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4714 return &SET_SRC (x);
4715 #endif
4717 /* See if we can split SET_SRC as it stands. */
4718 split = find_split_point (&SET_SRC (x), insn, true);
4719 if (split && split != &SET_SRC (x))
4720 return split;
4722 /* See if we can split SET_DEST as it stands. */
4723 split = find_split_point (&SET_DEST (x), insn, false);
4724 if (split && split != &SET_DEST (x))
4725 return split;
4727 /* See if this is a bitfield assignment with everything constant. If
4728 so, this is an IOR of an AND, so split it into that. */
4729 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4730 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
4731 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4732 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4733 && CONST_INT_P (SET_SRC (x))
4734 && ((INTVAL (XEXP (SET_DEST (x), 1))
4735 + INTVAL (XEXP (SET_DEST (x), 2)))
4736 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
4737 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4739 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4740 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4741 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4742 rtx dest = XEXP (SET_DEST (x), 0);
4743 enum machine_mode mode = GET_MODE (dest);
4744 unsigned HOST_WIDE_INT mask
4745 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4746 rtx or_mask;
4748 if (BITS_BIG_ENDIAN)
4749 pos = GET_MODE_PRECISION (mode) - len - pos;
4751 or_mask = gen_int_mode (src << pos, mode);
4752 if (src == mask)
4753 SUBST (SET_SRC (x),
4754 simplify_gen_binary (IOR, mode, dest, or_mask));
4755 else
4757 rtx negmask = gen_int_mode (~(mask << pos), mode);
4758 SUBST (SET_SRC (x),
4759 simplify_gen_binary (IOR, mode,
4760 simplify_gen_binary (AND, mode,
4761 dest, negmask),
4762 or_mask));
4765 SUBST (SET_DEST (x), dest);
4767 split = find_split_point (&SET_SRC (x), insn, true);
4768 if (split && split != &SET_SRC (x))
4769 return split;
4772 /* Otherwise, see if this is an operation that we can split into two.
4773 If so, try to split that. */
4774 code = GET_CODE (SET_SRC (x));
4776 switch (code)
4778 case AND:
4779 /* If we are AND'ing with a large constant that is only a single
4780 bit and the result is only being used in a context where we
4781 need to know if it is zero or nonzero, replace it with a bit
4782 extraction. This will avoid the large constant, which might
4783 have taken more than one insn to make. If the constant were
4784 not a valid argument to the AND but took only one insn to make,
4785 this is no worse, but if it took more than one insn, it will
4786 be better. */
4788 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4789 && REG_P (XEXP (SET_SRC (x), 0))
4790 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4791 && REG_P (SET_DEST (x))
4792 && (split = find_single_use (SET_DEST (x), insn, (rtx*) 0)) != 0
4793 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4794 && XEXP (*split, 0) == SET_DEST (x)
4795 && XEXP (*split, 1) == const0_rtx)
4797 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4798 XEXP (SET_SRC (x), 0),
4799 pos, NULL_RTX, 1, 1, 0, 0);
4800 if (extraction != 0)
4802 SUBST (SET_SRC (x), extraction);
4803 return find_split_point (loc, insn, false);
4806 break;
4808 case NE:
4809 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4810 is known to be on, this can be converted into a NEG of a shift. */
4811 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4812 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4813 && 1 <= (pos = exact_log2
4814 (nonzero_bits (XEXP (SET_SRC (x), 0),
4815 GET_MODE (XEXP (SET_SRC (x), 0))))))
4817 enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4819 SUBST (SET_SRC (x),
4820 gen_rtx_NEG (mode,
4821 gen_rtx_LSHIFTRT (mode,
4822 XEXP (SET_SRC (x), 0),
4823 GEN_INT (pos))));
4825 split = find_split_point (&SET_SRC (x), insn, true);
4826 if (split && split != &SET_SRC (x))
4827 return split;
4829 break;
4831 case SIGN_EXTEND:
4832 inner = XEXP (SET_SRC (x), 0);
4834 /* We can't optimize if either mode is a partial integer
4835 mode as we don't know how many bits are significant
4836 in those modes. */
4837 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4838 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4839 break;
4841 pos = 0;
4842 len = GET_MODE_PRECISION (GET_MODE (inner));
4843 unsignedp = 0;
4844 break;
4846 case SIGN_EXTRACT:
4847 case ZERO_EXTRACT:
4848 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4849 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
4851 inner = XEXP (SET_SRC (x), 0);
4852 len = INTVAL (XEXP (SET_SRC (x), 1));
4853 pos = INTVAL (XEXP (SET_SRC (x), 2));
4855 if (BITS_BIG_ENDIAN)
4856 pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
4857 unsignedp = (code == ZERO_EXTRACT);
4859 break;
4861 default:
4862 break;
4865 if (len && pos >= 0
4866 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
4868 enum machine_mode mode = GET_MODE (SET_SRC (x));
4870 /* For unsigned, we have a choice of a shift followed by an
4871 AND or two shifts. Use two shifts for field sizes where the
4872 constant might be too large. We assume here that we can
4873 always at least get 8-bit constants in an AND insn, which is
4874 true for every current RISC. */
4876 if (unsignedp && len <= 8)
4878 SUBST (SET_SRC (x),
4879 gen_rtx_AND (mode,
4880 gen_rtx_LSHIFTRT
4881 (mode, gen_lowpart (mode, inner),
4882 GEN_INT (pos)),
4883 GEN_INT (((unsigned HOST_WIDE_INT) 1 << len)
4884 - 1)));
4886 split = find_split_point (&SET_SRC (x), insn, true);
4887 if (split && split != &SET_SRC (x))
4888 return split;
4890 else
4892 SUBST (SET_SRC (x),
4893 gen_rtx_fmt_ee
4894 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
4895 gen_rtx_ASHIFT (mode,
4896 gen_lowpart (mode, inner),
4897 GEN_INT (GET_MODE_PRECISION (mode)
4898 - len - pos)),
4899 GEN_INT (GET_MODE_PRECISION (mode) - len)));
4901 split = find_split_point (&SET_SRC (x), insn, true);
4902 if (split && split != &SET_SRC (x))
4903 return split;
4907 /* See if this is a simple operation with a constant as the second
4908 operand. It might be that this constant is out of range and hence
4909 could be used as a split point. */
4910 if (BINARY_P (SET_SRC (x))
4911 && CONSTANT_P (XEXP (SET_SRC (x), 1))
4912 && (OBJECT_P (XEXP (SET_SRC (x), 0))
4913 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
4914 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
4915 return &XEXP (SET_SRC (x), 1);
4917 /* Finally, see if this is a simple operation with its first operand
4918 not in a register. The operation might require this operand in a
4919 register, so return it as a split point. We can always do this
4920 because if the first operand were another operation, we would have
4921 already found it as a split point. */
4922 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
4923 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
4924 return &XEXP (SET_SRC (x), 0);
4926 return 0;
4928 case AND:
4929 case IOR:
4930 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
4931 it is better to write this as (not (ior A B)) so we can split it.
4932 Similarly for IOR. */
4933 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
4935 SUBST (*loc,
4936 gen_rtx_NOT (GET_MODE (x),
4937 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
4938 GET_MODE (x),
4939 XEXP (XEXP (x, 0), 0),
4940 XEXP (XEXP (x, 1), 0))));
4941 return find_split_point (loc, insn, set_src);
4944 /* Many RISC machines have a large set of logical insns. If the
4945 second operand is a NOT, put it first so we will try to split the
4946 other operand first. */
4947 if (GET_CODE (XEXP (x, 1)) == NOT)
4949 rtx tem = XEXP (x, 0);
4950 SUBST (XEXP (x, 0), XEXP (x, 1));
4951 SUBST (XEXP (x, 1), tem);
4953 break;
4955 case PLUS:
4956 case MINUS:
4957 /* Canonicalization can produce (minus A (mult B C)), where C is a
4958 constant. It may be better to try splitting (plus (mult B -C) A)
4959 instead if this isn't a multiply by a power of two. */
4960 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
4961 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4962 && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
4964 enum machine_mode mode = GET_MODE (x);
4965 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
4966 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
4967 SUBST (*loc, gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
4968 XEXP (XEXP (x, 1), 0),
4969 GEN_INT (other_int)),
4970 XEXP (x, 0)));
4971 return find_split_point (loc, insn, set_src);
4974 /* Split at a multiply-accumulate instruction. However if this is
4975 the SET_SRC, we likely do not have such an instruction and it's
4976 worthless to try this split. */
4977 if (!set_src && GET_CODE (XEXP (x, 0)) == MULT)
4978 return loc;
4980 default:
4981 break;
4984 /* Otherwise, select our actions depending on our rtx class. */
4985 switch (GET_RTX_CLASS (code))
4987 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
4988 case RTX_TERNARY:
4989 split = find_split_point (&XEXP (x, 2), insn, false);
4990 if (split)
4991 return split;
4992 /* ... fall through ... */
4993 case RTX_BIN_ARITH:
4994 case RTX_COMM_ARITH:
4995 case RTX_COMPARE:
4996 case RTX_COMM_COMPARE:
4997 split = find_split_point (&XEXP (x, 1), insn, false);
4998 if (split)
4999 return split;
5000 /* ... fall through ... */
5001 case RTX_UNARY:
5002 /* Some machines have (and (shift ...) ...) insns. If X is not
5003 an AND, but XEXP (X, 0) is, use it as our split point. */
5004 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5005 return &XEXP (x, 0);
5007 split = find_split_point (&XEXP (x, 0), insn, false);
5008 if (split)
5009 return split;
5010 return loc;
5012 default:
5013 /* Otherwise, we don't have a split point. */
5014 return 0;
5018 /* Throughout X, replace FROM with TO, and return the result.
5019 The result is TO if X is FROM;
5020 otherwise the result is X, but its contents may have been modified.
5021 If they were modified, a record was made in undobuf so that
5022 undo_all will (among other things) return X to its original state.
5024 If the number of changes necessary is too much to record to undo,
5025 the excess changes are not made, so the result is invalid.
5026 The changes already made can still be undone.
5027 undobuf.num_undo is incremented for such changes, so by testing that
5028 the caller can tell whether the result is valid.
5030 `n_occurrences' is incremented each time FROM is replaced.
5032 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5034 IN_COND is nonzero if we are at the top level of a condition.
5036 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5037 by copying if `n_occurrences' is nonzero. */
5039 static rtx
5040 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5042 enum rtx_code code = GET_CODE (x);
5043 enum machine_mode op0_mode = VOIDmode;
5044 const char *fmt;
5045 int len, i;
5046 rtx new_rtx;
5048 /* Two expressions are equal if they are identical copies of a shared
5049 RTX or if they are both registers with the same register number
5050 and mode. */
5052 #define COMBINE_RTX_EQUAL_P(X,Y) \
5053 ((X) == (Y) \
5054 || (REG_P (X) && REG_P (Y) \
5055 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5057 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5059 n_occurrences++;
5060 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5063 /* If X and FROM are the same register but different modes, they
5064 will not have been seen as equal above. However, the log links code
5065 will make a LOG_LINKS entry for that case. If we do nothing, we
5066 will try to rerecognize our original insn and, when it succeeds,
5067 we will delete the feeding insn, which is incorrect.
5069 So force this insn not to match in this (rare) case. */
5070 if (! in_dest && code == REG && REG_P (from)
5071 && reg_overlap_mentioned_p (x, from))
5072 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5074 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5075 of which may contain things that can be combined. */
5076 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5077 return x;
5079 /* It is possible to have a subexpression appear twice in the insn.
5080 Suppose that FROM is a register that appears within TO.
5081 Then, after that subexpression has been scanned once by `subst',
5082 the second time it is scanned, TO may be found. If we were
5083 to scan TO here, we would find FROM within it and create a
5084 self-referent rtl structure which is completely wrong. */
5085 if (COMBINE_RTX_EQUAL_P (x, to))
5086 return to;
5088 /* Parallel asm_operands need special attention because all of the
5089 inputs are shared across the arms. Furthermore, unsharing the
5090 rtl results in recognition failures. Failure to handle this case
5091 specially can result in circular rtl.
5093 Solve this by doing a normal pass across the first entry of the
5094 parallel, and only processing the SET_DESTs of the subsequent
5095 entries. Ug. */
5097 if (code == PARALLEL
5098 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5099 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5101 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5103 /* If this substitution failed, this whole thing fails. */
5104 if (GET_CODE (new_rtx) == CLOBBER
5105 && XEXP (new_rtx, 0) == const0_rtx)
5106 return new_rtx;
5108 SUBST (XVECEXP (x, 0, 0), new_rtx);
5110 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5112 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5114 if (!REG_P (dest)
5115 && GET_CODE (dest) != CC0
5116 && GET_CODE (dest) != PC)
5118 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5120 /* If this substitution failed, this whole thing fails. */
5121 if (GET_CODE (new_rtx) == CLOBBER
5122 && XEXP (new_rtx, 0) == const0_rtx)
5123 return new_rtx;
5125 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5129 else
5131 len = GET_RTX_LENGTH (code);
5132 fmt = GET_RTX_FORMAT (code);
5134 /* We don't need to process a SET_DEST that is a register, CC0,
5135 or PC, so set up to skip this common case. All other cases
5136 where we want to suppress replacing something inside a
5137 SET_SRC are handled via the IN_DEST operand. */
5138 if (code == SET
5139 && (REG_P (SET_DEST (x))
5140 || GET_CODE (SET_DEST (x)) == CC0
5141 || GET_CODE (SET_DEST (x)) == PC))
5142 fmt = "ie";
5144 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5145 constant. */
5146 if (fmt[0] == 'e')
5147 op0_mode = GET_MODE (XEXP (x, 0));
5149 for (i = 0; i < len; i++)
5151 if (fmt[i] == 'E')
5153 int j;
5154 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5156 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5158 new_rtx = (unique_copy && n_occurrences
5159 ? copy_rtx (to) : to);
5160 n_occurrences++;
5162 else
5164 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5165 unique_copy);
5167 /* If this substitution failed, this whole thing
5168 fails. */
5169 if (GET_CODE (new_rtx) == CLOBBER
5170 && XEXP (new_rtx, 0) == const0_rtx)
5171 return new_rtx;
5174 SUBST (XVECEXP (x, i, j), new_rtx);
5177 else if (fmt[i] == 'e')
5179 /* If this is a register being set, ignore it. */
5180 new_rtx = XEXP (x, i);
5181 if (in_dest
5182 && i == 0
5183 && (((code == SUBREG || code == ZERO_EXTRACT)
5184 && REG_P (new_rtx))
5185 || code == STRICT_LOW_PART))
5188 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5190 /* In general, don't install a subreg involving two
5191 modes not tieable. It can worsen register
5192 allocation, and can even make invalid reload
5193 insns, since the reg inside may need to be copied
5194 from in the outside mode, and that may be invalid
5195 if it is an fp reg copied in integer mode.
5197 We allow two exceptions to this: It is valid if
5198 it is inside another SUBREG and the mode of that
5199 SUBREG and the mode of the inside of TO is
5200 tieable and it is valid if X is a SET that copies
5201 FROM to CC0. */
5203 if (GET_CODE (to) == SUBREG
5204 && ! MODES_TIEABLE_P (GET_MODE (to),
5205 GET_MODE (SUBREG_REG (to)))
5206 && ! (code == SUBREG
5207 && MODES_TIEABLE_P (GET_MODE (x),
5208 GET_MODE (SUBREG_REG (to))))
5209 #ifdef HAVE_cc0
5210 && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
5211 #endif
5213 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5215 #ifdef CANNOT_CHANGE_MODE_CLASS
5216 if (code == SUBREG
5217 && REG_P (to)
5218 && REGNO (to) < FIRST_PSEUDO_REGISTER
5219 && REG_CANNOT_CHANGE_MODE_P (REGNO (to),
5220 GET_MODE (to),
5221 GET_MODE (x)))
5222 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5223 #endif
5225 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5226 n_occurrences++;
5228 else
5229 /* If we are in a SET_DEST, suppress most cases unless we
5230 have gone inside a MEM, in which case we want to
5231 simplify the address. We assume here that things that
5232 are actually part of the destination have their inner
5233 parts in the first expression. This is true for SUBREG,
5234 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5235 things aside from REG and MEM that should appear in a
5236 SET_DEST. */
5237 new_rtx = subst (XEXP (x, i), from, to,
5238 (((in_dest
5239 && (code == SUBREG || code == STRICT_LOW_PART
5240 || code == ZERO_EXTRACT))
5241 || code == SET)
5242 && i == 0),
5243 code == IF_THEN_ELSE && i == 0,
5244 unique_copy);
5246 /* If we found that we will have to reject this combination,
5247 indicate that by returning the CLOBBER ourselves, rather than
5248 an expression containing it. This will speed things up as
5249 well as prevent accidents where two CLOBBERs are considered
5250 to be equal, thus producing an incorrect simplification. */
5252 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5253 return new_rtx;
5255 if (GET_CODE (x) == SUBREG
5256 && (CONST_INT_P (new_rtx)
5257 || GET_CODE (new_rtx) == CONST_DOUBLE))
5259 enum machine_mode mode = GET_MODE (x);
5261 x = simplify_subreg (GET_MODE (x), new_rtx,
5262 GET_MODE (SUBREG_REG (x)),
5263 SUBREG_BYTE (x));
5264 if (! x)
5265 x = gen_rtx_CLOBBER (mode, const0_rtx);
5267 else if (CONST_INT_P (new_rtx)
5268 && GET_CODE (x) == ZERO_EXTEND)
5270 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5271 new_rtx, GET_MODE (XEXP (x, 0)));
5272 gcc_assert (x);
5274 else
5275 SUBST (XEXP (x, i), new_rtx);
5280 /* Check if we are loading something from the constant pool via float
5281 extension; in this case we would undo compress_float_constant
5282 optimization and degenerate constant load to an immediate value. */
5283 if (GET_CODE (x) == FLOAT_EXTEND
5284 && MEM_P (XEXP (x, 0))
5285 && MEM_READONLY_P (XEXP (x, 0)))
5287 rtx tmp = avoid_constant_pool_reference (x);
5288 if (x != tmp)
5289 return x;
5292 /* Try to simplify X. If the simplification changed the code, it is likely
5293 that further simplification will help, so loop, but limit the number
5294 of repetitions that will be performed. */
5296 for (i = 0; i < 4; i++)
5298 /* If X is sufficiently simple, don't bother trying to do anything
5299 with it. */
5300 if (code != CONST_INT && code != REG && code != CLOBBER)
5301 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5303 if (GET_CODE (x) == code)
5304 break;
5306 code = GET_CODE (x);
5308 /* We no longer know the original mode of operand 0 since we
5309 have changed the form of X) */
5310 op0_mode = VOIDmode;
5313 return x;
5316 /* Simplify X, a piece of RTL. We just operate on the expression at the
5317 outer level; call `subst' to simplify recursively. Return the new
5318 expression.
5320 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5321 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5322 of a condition. */
5324 static rtx
5325 combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest,
5326 int in_cond)
5328 enum rtx_code code = GET_CODE (x);
5329 enum machine_mode mode = GET_MODE (x);
5330 rtx temp;
5331 int i;
5333 /* If this is a commutative operation, put a constant last and a complex
5334 expression first. We don't need to do this for comparisons here. */
5335 if (COMMUTATIVE_ARITH_P (x)
5336 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5338 temp = XEXP (x, 0);
5339 SUBST (XEXP (x, 0), XEXP (x, 1));
5340 SUBST (XEXP (x, 1), temp);
5343 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5344 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5345 things. Check for cases where both arms are testing the same
5346 condition.
5348 Don't do anything if all operands are very simple. */
5350 if ((BINARY_P (x)
5351 && ((!OBJECT_P (XEXP (x, 0))
5352 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5353 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5354 || (!OBJECT_P (XEXP (x, 1))
5355 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5356 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5357 || (UNARY_P (x)
5358 && (!OBJECT_P (XEXP (x, 0))
5359 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5360 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5362 rtx cond, true_rtx, false_rtx;
5364 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5365 if (cond != 0
5366 /* If everything is a comparison, what we have is highly unlikely
5367 to be simpler, so don't use it. */
5368 && ! (COMPARISON_P (x)
5369 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5371 rtx cop1 = const0_rtx;
5372 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5374 if (cond_code == NE && COMPARISON_P (cond))
5375 return x;
5377 /* Simplify the alternative arms; this may collapse the true and
5378 false arms to store-flag values. Be careful to use copy_rtx
5379 here since true_rtx or false_rtx might share RTL with x as a
5380 result of the if_then_else_cond call above. */
5381 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5382 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5384 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5385 is unlikely to be simpler. */
5386 if (general_operand (true_rtx, VOIDmode)
5387 && general_operand (false_rtx, VOIDmode))
5389 enum rtx_code reversed;
5391 /* Restarting if we generate a store-flag expression will cause
5392 us to loop. Just drop through in this case. */
5394 /* If the result values are STORE_FLAG_VALUE and zero, we can
5395 just make the comparison operation. */
5396 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5397 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5398 cond, cop1);
5399 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5400 && ((reversed = reversed_comparison_code_parts
5401 (cond_code, cond, cop1, NULL))
5402 != UNKNOWN))
5403 x = simplify_gen_relational (reversed, mode, VOIDmode,
5404 cond, cop1);
5406 /* Likewise, we can make the negate of a comparison operation
5407 if the result values are - STORE_FLAG_VALUE and zero. */
5408 else if (CONST_INT_P (true_rtx)
5409 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5410 && false_rtx == const0_rtx)
5411 x = simplify_gen_unary (NEG, mode,
5412 simplify_gen_relational (cond_code,
5413 mode, VOIDmode,
5414 cond, cop1),
5415 mode);
5416 else if (CONST_INT_P (false_rtx)
5417 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5418 && true_rtx == const0_rtx
5419 && ((reversed = reversed_comparison_code_parts
5420 (cond_code, cond, cop1, NULL))
5421 != UNKNOWN))
5422 x = simplify_gen_unary (NEG, mode,
5423 simplify_gen_relational (reversed,
5424 mode, VOIDmode,
5425 cond, cop1),
5426 mode);
5427 else
5428 return gen_rtx_IF_THEN_ELSE (mode,
5429 simplify_gen_relational (cond_code,
5430 mode,
5431 VOIDmode,
5432 cond,
5433 cop1),
5434 true_rtx, false_rtx);
5436 code = GET_CODE (x);
5437 op0_mode = VOIDmode;
5442 /* Try to fold this expression in case we have constants that weren't
5443 present before. */
5444 temp = 0;
5445 switch (GET_RTX_CLASS (code))
5447 case RTX_UNARY:
5448 if (op0_mode == VOIDmode)
5449 op0_mode = GET_MODE (XEXP (x, 0));
5450 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5451 break;
5452 case RTX_COMPARE:
5453 case RTX_COMM_COMPARE:
5455 enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5456 if (cmp_mode == VOIDmode)
5458 cmp_mode = GET_MODE (XEXP (x, 1));
5459 if (cmp_mode == VOIDmode)
5460 cmp_mode = op0_mode;
5462 temp = simplify_relational_operation (code, mode, cmp_mode,
5463 XEXP (x, 0), XEXP (x, 1));
5465 break;
5466 case RTX_COMM_ARITH:
5467 case RTX_BIN_ARITH:
5468 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5469 break;
5470 case RTX_BITFIELD_OPS:
5471 case RTX_TERNARY:
5472 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5473 XEXP (x, 1), XEXP (x, 2));
5474 break;
5475 default:
5476 break;
5479 if (temp)
5481 x = temp;
5482 code = GET_CODE (temp);
5483 op0_mode = VOIDmode;
5484 mode = GET_MODE (temp);
5487 /* First see if we can apply the inverse distributive law. */
5488 if (code == PLUS || code == MINUS
5489 || code == AND || code == IOR || code == XOR)
5491 x = apply_distributive_law (x);
5492 code = GET_CODE (x);
5493 op0_mode = VOIDmode;
5496 /* If CODE is an associative operation not otherwise handled, see if we
5497 can associate some operands. This can win if they are constants or
5498 if they are logically related (i.e. (a & b) & a). */
5499 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5500 || code == AND || code == IOR || code == XOR
5501 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5502 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5503 || (flag_associative_math && FLOAT_MODE_P (mode))))
5505 if (GET_CODE (XEXP (x, 0)) == code)
5507 rtx other = XEXP (XEXP (x, 0), 0);
5508 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5509 rtx inner_op1 = XEXP (x, 1);
5510 rtx inner;
5512 /* Make sure we pass the constant operand if any as the second
5513 one if this is a commutative operation. */
5514 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5516 rtx tem = inner_op0;
5517 inner_op0 = inner_op1;
5518 inner_op1 = tem;
5520 inner = simplify_binary_operation (code == MINUS ? PLUS
5521 : code == DIV ? MULT
5522 : code,
5523 mode, inner_op0, inner_op1);
5525 /* For commutative operations, try the other pair if that one
5526 didn't simplify. */
5527 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5529 other = XEXP (XEXP (x, 0), 1);
5530 inner = simplify_binary_operation (code, mode,
5531 XEXP (XEXP (x, 0), 0),
5532 XEXP (x, 1));
5535 if (inner)
5536 return simplify_gen_binary (code, mode, other, inner);
5540 /* A little bit of algebraic simplification here. */
5541 switch (code)
5543 case MEM:
5544 /* Ensure that our address has any ASHIFTs converted to MULT in case
5545 address-recognizing predicates are called later. */
5546 temp = make_compound_operation (XEXP (x, 0), MEM);
5547 SUBST (XEXP (x, 0), temp);
5548 break;
5550 case SUBREG:
5551 if (op0_mode == VOIDmode)
5552 op0_mode = GET_MODE (SUBREG_REG (x));
5554 /* See if this can be moved to simplify_subreg. */
5555 if (CONSTANT_P (SUBREG_REG (x))
5556 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5557 /* Don't call gen_lowpart if the inner mode
5558 is VOIDmode and we cannot simplify it, as SUBREG without
5559 inner mode is invalid. */
5560 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5561 || gen_lowpart_common (mode, SUBREG_REG (x))))
5562 return gen_lowpart (mode, SUBREG_REG (x));
5564 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5565 break;
5567 rtx temp;
5568 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5569 SUBREG_BYTE (x));
5570 if (temp)
5571 return temp;
5574 /* Don't change the mode of the MEM if that would change the meaning
5575 of the address. */
5576 if (MEM_P (SUBREG_REG (x))
5577 && (MEM_VOLATILE_P (SUBREG_REG (x))
5578 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0))))
5579 return gen_rtx_CLOBBER (mode, const0_rtx);
5581 /* Note that we cannot do any narrowing for non-constants since
5582 we might have been counting on using the fact that some bits were
5583 zero. We now do this in the SET. */
5585 break;
5587 case NEG:
5588 temp = expand_compound_operation (XEXP (x, 0));
5590 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5591 replaced by (lshiftrt X C). This will convert
5592 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5594 if (GET_CODE (temp) == ASHIFTRT
5595 && CONST_INT_P (XEXP (temp, 1))
5596 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5597 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5598 INTVAL (XEXP (temp, 1)));
5600 /* If X has only a single bit that might be nonzero, say, bit I, convert
5601 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5602 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5603 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5604 or a SUBREG of one since we'd be making the expression more
5605 complex if it was just a register. */
5607 if (!REG_P (temp)
5608 && ! (GET_CODE (temp) == SUBREG
5609 && REG_P (SUBREG_REG (temp)))
5610 && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5612 rtx temp1 = simplify_shift_const
5613 (NULL_RTX, ASHIFTRT, mode,
5614 simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5615 GET_MODE_PRECISION (mode) - 1 - i),
5616 GET_MODE_PRECISION (mode) - 1 - i);
5618 /* If all we did was surround TEMP with the two shifts, we
5619 haven't improved anything, so don't use it. Otherwise,
5620 we are better off with TEMP1. */
5621 if (GET_CODE (temp1) != ASHIFTRT
5622 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5623 || XEXP (XEXP (temp1, 0), 0) != temp)
5624 return temp1;
5626 break;
5628 case TRUNCATE:
5629 /* We can't handle truncation to a partial integer mode here
5630 because we don't know the real bitsize of the partial
5631 integer mode. */
5632 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5633 break;
5635 if (HWI_COMPUTABLE_MODE_P (mode))
5636 SUBST (XEXP (x, 0),
5637 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5638 GET_MODE_MASK (mode), 0));
5640 /* We can truncate a constant value and return it. */
5641 if (CONST_INT_P (XEXP (x, 0)))
5642 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5644 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5645 whose value is a comparison can be replaced with a subreg if
5646 STORE_FLAG_VALUE permits. */
5647 if (HWI_COMPUTABLE_MODE_P (mode)
5648 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5649 && (temp = get_last_value (XEXP (x, 0)))
5650 && COMPARISON_P (temp))
5651 return gen_lowpart (mode, XEXP (x, 0));
5652 break;
5654 case CONST:
5655 /* (const (const X)) can become (const X). Do it this way rather than
5656 returning the inner CONST since CONST can be shared with a
5657 REG_EQUAL note. */
5658 if (GET_CODE (XEXP (x, 0)) == CONST)
5659 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5660 break;
5662 #ifdef HAVE_lo_sum
5663 case LO_SUM:
5664 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5665 can add in an offset. find_split_point will split this address up
5666 again if it doesn't match. */
5667 if (GET_CODE (XEXP (x, 0)) == HIGH
5668 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5669 return XEXP (x, 1);
5670 break;
5671 #endif
5673 case PLUS:
5674 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5675 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5676 bit-field and can be replaced by either a sign_extend or a
5677 sign_extract. The `and' may be a zero_extend and the two
5678 <c>, -<c> constants may be reversed. */
5679 if (GET_CODE (XEXP (x, 0)) == XOR
5680 && CONST_INT_P (XEXP (x, 1))
5681 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5682 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5683 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5684 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5685 && HWI_COMPUTABLE_MODE_P (mode)
5686 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5687 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5688 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5689 == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5690 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5691 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5692 == (unsigned int) i + 1))))
5693 return simplify_shift_const
5694 (NULL_RTX, ASHIFTRT, mode,
5695 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5696 XEXP (XEXP (XEXP (x, 0), 0), 0),
5697 GET_MODE_PRECISION (mode) - (i + 1)),
5698 GET_MODE_PRECISION (mode) - (i + 1));
5700 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5701 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5702 the bitsize of the mode - 1. This allows simplification of
5703 "a = (b & 8) == 0;" */
5704 if (XEXP (x, 1) == constm1_rtx
5705 && !REG_P (XEXP (x, 0))
5706 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5707 && REG_P (SUBREG_REG (XEXP (x, 0))))
5708 && nonzero_bits (XEXP (x, 0), mode) == 1)
5709 return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5710 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5711 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5712 GET_MODE_PRECISION (mode) - 1),
5713 GET_MODE_PRECISION (mode) - 1);
5715 /* If we are adding two things that have no bits in common, convert
5716 the addition into an IOR. This will often be further simplified,
5717 for example in cases like ((a & 1) + (a & 2)), which can
5718 become a & 3. */
5720 if (HWI_COMPUTABLE_MODE_P (mode)
5721 && (nonzero_bits (XEXP (x, 0), mode)
5722 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5724 /* Try to simplify the expression further. */
5725 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5726 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5728 /* If we could, great. If not, do not go ahead with the IOR
5729 replacement, since PLUS appears in many special purpose
5730 address arithmetic instructions. */
5731 if (GET_CODE (temp) != CLOBBER
5732 && (GET_CODE (temp) != IOR
5733 || ((XEXP (temp, 0) != XEXP (x, 0)
5734 || XEXP (temp, 1) != XEXP (x, 1))
5735 && (XEXP (temp, 0) != XEXP (x, 1)
5736 || XEXP (temp, 1) != XEXP (x, 0)))))
5737 return temp;
5739 break;
5741 case MINUS:
5742 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5743 (and <foo> (const_int pow2-1)) */
5744 if (GET_CODE (XEXP (x, 1)) == AND
5745 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5746 && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5747 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5748 return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5749 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5750 break;
5752 case MULT:
5753 /* If we have (mult (plus A B) C), apply the distributive law and then
5754 the inverse distributive law to see if things simplify. This
5755 occurs mostly in addresses, often when unrolling loops. */
5757 if (GET_CODE (XEXP (x, 0)) == PLUS)
5759 rtx result = distribute_and_simplify_rtx (x, 0);
5760 if (result)
5761 return result;
5764 /* Try simplify a*(b/c) as (a*b)/c. */
5765 if (FLOAT_MODE_P (mode) && flag_associative_math
5766 && GET_CODE (XEXP (x, 0)) == DIV)
5768 rtx tem = simplify_binary_operation (MULT, mode,
5769 XEXP (XEXP (x, 0), 0),
5770 XEXP (x, 1));
5771 if (tem)
5772 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5774 break;
5776 case UDIV:
5777 /* If this is a divide by a power of two, treat it as a shift if
5778 its first operand is a shift. */
5779 if (CONST_INT_P (XEXP (x, 1))
5780 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5781 && (GET_CODE (XEXP (x, 0)) == ASHIFT
5782 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5783 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5784 || GET_CODE (XEXP (x, 0)) == ROTATE
5785 || GET_CODE (XEXP (x, 0)) == ROTATERT))
5786 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5787 break;
5789 case EQ: case NE:
5790 case GT: case GTU: case GE: case GEU:
5791 case LT: case LTU: case LE: case LEU:
5792 case UNEQ: case LTGT:
5793 case UNGT: case UNGE:
5794 case UNLT: case UNLE:
5795 case UNORDERED: case ORDERED:
5796 /* If the first operand is a condition code, we can't do anything
5797 with it. */
5798 if (GET_CODE (XEXP (x, 0)) == COMPARE
5799 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5800 && ! CC0_P (XEXP (x, 0))))
5802 rtx op0 = XEXP (x, 0);
5803 rtx op1 = XEXP (x, 1);
5804 enum rtx_code new_code;
5806 if (GET_CODE (op0) == COMPARE)
5807 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5809 /* Simplify our comparison, if possible. */
5810 new_code = simplify_comparison (code, &op0, &op1);
5812 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5813 if only the low-order bit is possibly nonzero in X (such as when
5814 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5815 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5816 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5817 (plus X 1).
5819 Remove any ZERO_EXTRACT we made when thinking this was a
5820 comparison. It may now be simpler to use, e.g., an AND. If a
5821 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5822 the call to make_compound_operation in the SET case.
5824 Don't apply these optimizations if the caller would
5825 prefer a comparison rather than a value.
5826 E.g., for the condition in an IF_THEN_ELSE most targets need
5827 an explicit comparison. */
5829 if (in_cond)
5832 else if (STORE_FLAG_VALUE == 1
5833 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5834 && op1 == const0_rtx
5835 && mode == GET_MODE (op0)
5836 && nonzero_bits (op0, mode) == 1)
5837 return gen_lowpart (mode,
5838 expand_compound_operation (op0));
5840 else if (STORE_FLAG_VALUE == 1
5841 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5842 && op1 == const0_rtx
5843 && mode == GET_MODE (op0)
5844 && (num_sign_bit_copies (op0, mode)
5845 == GET_MODE_PRECISION (mode)))
5847 op0 = expand_compound_operation (op0);
5848 return simplify_gen_unary (NEG, mode,
5849 gen_lowpart (mode, op0),
5850 mode);
5853 else if (STORE_FLAG_VALUE == 1
5854 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5855 && op1 == const0_rtx
5856 && mode == GET_MODE (op0)
5857 && nonzero_bits (op0, mode) == 1)
5859 op0 = expand_compound_operation (op0);
5860 return simplify_gen_binary (XOR, mode,
5861 gen_lowpart (mode, op0),
5862 const1_rtx);
5865 else if (STORE_FLAG_VALUE == 1
5866 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5867 && op1 == const0_rtx
5868 && mode == GET_MODE (op0)
5869 && (num_sign_bit_copies (op0, mode)
5870 == GET_MODE_PRECISION (mode)))
5872 op0 = expand_compound_operation (op0);
5873 return plus_constant (mode, gen_lowpart (mode, op0), 1);
5876 /* If STORE_FLAG_VALUE is -1, we have cases similar to
5877 those above. */
5878 if (in_cond)
5881 else if (STORE_FLAG_VALUE == -1
5882 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5883 && op1 == const0_rtx
5884 && (num_sign_bit_copies (op0, mode)
5885 == GET_MODE_PRECISION (mode)))
5886 return gen_lowpart (mode,
5887 expand_compound_operation (op0));
5889 else if (STORE_FLAG_VALUE == -1
5890 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5891 && op1 == const0_rtx
5892 && mode == GET_MODE (op0)
5893 && nonzero_bits (op0, mode) == 1)
5895 op0 = expand_compound_operation (op0);
5896 return simplify_gen_unary (NEG, mode,
5897 gen_lowpart (mode, op0),
5898 mode);
5901 else if (STORE_FLAG_VALUE == -1
5902 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5903 && op1 == const0_rtx
5904 && mode == GET_MODE (op0)
5905 && (num_sign_bit_copies (op0, mode)
5906 == GET_MODE_PRECISION (mode)))
5908 op0 = expand_compound_operation (op0);
5909 return simplify_gen_unary (NOT, mode,
5910 gen_lowpart (mode, op0),
5911 mode);
5914 /* If X is 0/1, (eq X 0) is X-1. */
5915 else if (STORE_FLAG_VALUE == -1
5916 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5917 && op1 == const0_rtx
5918 && mode == GET_MODE (op0)
5919 && nonzero_bits (op0, mode) == 1)
5921 op0 = expand_compound_operation (op0);
5922 return plus_constant (mode, gen_lowpart (mode, op0), -1);
5925 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
5926 one bit that might be nonzero, we can convert (ne x 0) to
5927 (ashift x c) where C puts the bit in the sign bit. Remove any
5928 AND with STORE_FLAG_VALUE when we are done, since we are only
5929 going to test the sign bit. */
5930 if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5931 && HWI_COMPUTABLE_MODE_P (mode)
5932 && val_signbit_p (mode, STORE_FLAG_VALUE)
5933 && op1 == const0_rtx
5934 && mode == GET_MODE (op0)
5935 && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
5937 x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
5938 expand_compound_operation (op0),
5939 GET_MODE_PRECISION (mode) - 1 - i);
5940 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
5941 return XEXP (x, 0);
5942 else
5943 return x;
5946 /* If the code changed, return a whole new comparison. */
5947 if (new_code != code)
5948 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
5950 /* Otherwise, keep this operation, but maybe change its operands.
5951 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
5952 SUBST (XEXP (x, 0), op0);
5953 SUBST (XEXP (x, 1), op1);
5955 break;
5957 case IF_THEN_ELSE:
5958 return simplify_if_then_else (x);
5960 case ZERO_EXTRACT:
5961 case SIGN_EXTRACT:
5962 case ZERO_EXTEND:
5963 case SIGN_EXTEND:
5964 /* If we are processing SET_DEST, we are done. */
5965 if (in_dest)
5966 return x;
5968 return expand_compound_operation (x);
5970 case SET:
5971 return simplify_set (x);
5973 case AND:
5974 case IOR:
5975 return simplify_logical (x);
5977 case ASHIFT:
5978 case LSHIFTRT:
5979 case ASHIFTRT:
5980 case ROTATE:
5981 case ROTATERT:
5982 /* If this is a shift by a constant amount, simplify it. */
5983 if (CONST_INT_P (XEXP (x, 1)))
5984 return simplify_shift_const (x, code, mode, XEXP (x, 0),
5985 INTVAL (XEXP (x, 1)));
5987 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
5988 SUBST (XEXP (x, 1),
5989 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
5990 ((unsigned HOST_WIDE_INT) 1
5991 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
5992 - 1,
5993 0));
5994 break;
5996 default:
5997 break;
6000 return x;
6003 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6005 static rtx
6006 simplify_if_then_else (rtx x)
6008 enum machine_mode mode = GET_MODE (x);
6009 rtx cond = XEXP (x, 0);
6010 rtx true_rtx = XEXP (x, 1);
6011 rtx false_rtx = XEXP (x, 2);
6012 enum rtx_code true_code = GET_CODE (cond);
6013 int comparison_p = COMPARISON_P (cond);
6014 rtx temp;
6015 int i;
6016 enum rtx_code false_code;
6017 rtx reversed;
6019 /* Simplify storing of the truth value. */
6020 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6021 return simplify_gen_relational (true_code, mode, VOIDmode,
6022 XEXP (cond, 0), XEXP (cond, 1));
6024 /* Also when the truth value has to be reversed. */
6025 if (comparison_p
6026 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6027 && (reversed = reversed_comparison (cond, mode)))
6028 return reversed;
6030 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6031 in it is being compared against certain values. Get the true and false
6032 comparisons and see if that says anything about the value of each arm. */
6034 if (comparison_p
6035 && ((false_code = reversed_comparison_code (cond, NULL))
6036 != UNKNOWN)
6037 && REG_P (XEXP (cond, 0)))
6039 HOST_WIDE_INT nzb;
6040 rtx from = XEXP (cond, 0);
6041 rtx true_val = XEXP (cond, 1);
6042 rtx false_val = true_val;
6043 int swapped = 0;
6045 /* If FALSE_CODE is EQ, swap the codes and arms. */
6047 if (false_code == EQ)
6049 swapped = 1, true_code = EQ, false_code = NE;
6050 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
6053 /* If we are comparing against zero and the expression being tested has
6054 only a single bit that might be nonzero, that is its value when it is
6055 not equal to zero. Similarly if it is known to be -1 or 0. */
6057 if (true_code == EQ && true_val == const0_rtx
6058 && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
6060 false_code = EQ;
6061 false_val = gen_int_mode (nzb, GET_MODE (from));
6063 else if (true_code == EQ && true_val == const0_rtx
6064 && (num_sign_bit_copies (from, GET_MODE (from))
6065 == GET_MODE_PRECISION (GET_MODE (from))))
6067 false_code = EQ;
6068 false_val = constm1_rtx;
6071 /* Now simplify an arm if we know the value of the register in the
6072 branch and it is used in the arm. Be careful due to the potential
6073 of locally-shared RTL. */
6075 if (reg_mentioned_p (from, true_rtx))
6076 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6077 from, true_val),
6078 pc_rtx, pc_rtx, 0, 0, 0);
6079 if (reg_mentioned_p (from, false_rtx))
6080 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6081 from, false_val),
6082 pc_rtx, pc_rtx, 0, 0, 0);
6084 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6085 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6087 true_rtx = XEXP (x, 1);
6088 false_rtx = XEXP (x, 2);
6089 true_code = GET_CODE (cond);
6092 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6093 reversed, do so to avoid needing two sets of patterns for
6094 subtract-and-branch insns. Similarly if we have a constant in the true
6095 arm, the false arm is the same as the first operand of the comparison, or
6096 the false arm is more complicated than the true arm. */
6098 if (comparison_p
6099 && reversed_comparison_code (cond, NULL) != UNKNOWN
6100 && (true_rtx == pc_rtx
6101 || (CONSTANT_P (true_rtx)
6102 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6103 || true_rtx == const0_rtx
6104 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6105 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6106 && !OBJECT_P (false_rtx))
6107 || reg_mentioned_p (true_rtx, false_rtx)
6108 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6110 true_code = reversed_comparison_code (cond, NULL);
6111 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6112 SUBST (XEXP (x, 1), false_rtx);
6113 SUBST (XEXP (x, 2), true_rtx);
6115 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
6116 cond = XEXP (x, 0);
6118 /* It is possible that the conditional has been simplified out. */
6119 true_code = GET_CODE (cond);
6120 comparison_p = COMPARISON_P (cond);
6123 /* If the two arms are identical, we don't need the comparison. */
6125 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6126 return true_rtx;
6128 /* Convert a == b ? b : a to "a". */
6129 if (true_code == EQ && ! side_effects_p (cond)
6130 && !HONOR_NANS (mode)
6131 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6132 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6133 return false_rtx;
6134 else if (true_code == NE && ! side_effects_p (cond)
6135 && !HONOR_NANS (mode)
6136 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6137 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6138 return true_rtx;
6140 /* Look for cases where we have (abs x) or (neg (abs X)). */
6142 if (GET_MODE_CLASS (mode) == MODE_INT
6143 && comparison_p
6144 && XEXP (cond, 1) == const0_rtx
6145 && GET_CODE (false_rtx) == NEG
6146 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6147 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6148 && ! side_effects_p (true_rtx))
6149 switch (true_code)
6151 case GT:
6152 case GE:
6153 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6154 case LT:
6155 case LE:
6156 return
6157 simplify_gen_unary (NEG, mode,
6158 simplify_gen_unary (ABS, mode, true_rtx, mode),
6159 mode);
6160 default:
6161 break;
6164 /* Look for MIN or MAX. */
6166 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6167 && comparison_p
6168 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6169 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6170 && ! side_effects_p (cond))
6171 switch (true_code)
6173 case GE:
6174 case GT:
6175 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6176 case LE:
6177 case LT:
6178 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6179 case GEU:
6180 case GTU:
6181 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6182 case LEU:
6183 case LTU:
6184 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6185 default:
6186 break;
6189 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6190 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6191 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6192 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6193 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6194 neither 1 or -1, but it isn't worth checking for. */
6196 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6197 && comparison_p
6198 && GET_MODE_CLASS (mode) == MODE_INT
6199 && ! side_effects_p (x))
6201 rtx t = make_compound_operation (true_rtx, SET);
6202 rtx f = make_compound_operation (false_rtx, SET);
6203 rtx cond_op0 = XEXP (cond, 0);
6204 rtx cond_op1 = XEXP (cond, 1);
6205 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6206 enum machine_mode m = mode;
6207 rtx z = 0, c1 = NULL_RTX;
6209 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6210 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6211 || GET_CODE (t) == ASHIFT
6212 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6213 && rtx_equal_p (XEXP (t, 0), f))
6214 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6216 /* If an identity-zero op is commutative, check whether there
6217 would be a match if we swapped the operands. */
6218 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6219 || GET_CODE (t) == XOR)
6220 && rtx_equal_p (XEXP (t, 1), f))
6221 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6222 else if (GET_CODE (t) == SIGN_EXTEND
6223 && (GET_CODE (XEXP (t, 0)) == PLUS
6224 || GET_CODE (XEXP (t, 0)) == MINUS
6225 || GET_CODE (XEXP (t, 0)) == IOR
6226 || GET_CODE (XEXP (t, 0)) == XOR
6227 || GET_CODE (XEXP (t, 0)) == ASHIFT
6228 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6229 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6230 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6231 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6232 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6233 && (num_sign_bit_copies (f, GET_MODE (f))
6234 > (unsigned int)
6235 (GET_MODE_PRECISION (mode)
6236 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6238 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6239 extend_op = SIGN_EXTEND;
6240 m = GET_MODE (XEXP (t, 0));
6242 else if (GET_CODE (t) == SIGN_EXTEND
6243 && (GET_CODE (XEXP (t, 0)) == PLUS
6244 || GET_CODE (XEXP (t, 0)) == IOR
6245 || GET_CODE (XEXP (t, 0)) == XOR)
6246 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6247 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6248 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6249 && (num_sign_bit_copies (f, GET_MODE (f))
6250 > (unsigned int)
6251 (GET_MODE_PRECISION (mode)
6252 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6254 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6255 extend_op = SIGN_EXTEND;
6256 m = GET_MODE (XEXP (t, 0));
6258 else if (GET_CODE (t) == ZERO_EXTEND
6259 && (GET_CODE (XEXP (t, 0)) == PLUS
6260 || GET_CODE (XEXP (t, 0)) == MINUS
6261 || GET_CODE (XEXP (t, 0)) == IOR
6262 || GET_CODE (XEXP (t, 0)) == XOR
6263 || GET_CODE (XEXP (t, 0)) == ASHIFT
6264 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6265 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6266 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6267 && HWI_COMPUTABLE_MODE_P (mode)
6268 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6269 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6270 && ((nonzero_bits (f, GET_MODE (f))
6271 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6272 == 0))
6274 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6275 extend_op = ZERO_EXTEND;
6276 m = GET_MODE (XEXP (t, 0));
6278 else if (GET_CODE (t) == ZERO_EXTEND
6279 && (GET_CODE (XEXP (t, 0)) == PLUS
6280 || GET_CODE (XEXP (t, 0)) == IOR
6281 || GET_CODE (XEXP (t, 0)) == XOR)
6282 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6283 && HWI_COMPUTABLE_MODE_P (mode)
6284 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6285 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6286 && ((nonzero_bits (f, GET_MODE (f))
6287 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6288 == 0))
6290 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6291 extend_op = ZERO_EXTEND;
6292 m = GET_MODE (XEXP (t, 0));
6295 if (z)
6297 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6298 cond_op0, cond_op1),
6299 pc_rtx, pc_rtx, 0, 0, 0);
6300 temp = simplify_gen_binary (MULT, m, temp,
6301 simplify_gen_binary (MULT, m, c1,
6302 const_true_rtx));
6303 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6304 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6306 if (extend_op != UNKNOWN)
6307 temp = simplify_gen_unary (extend_op, mode, temp, m);
6309 return temp;
6313 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6314 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6315 negation of a single bit, we can convert this operation to a shift. We
6316 can actually do this more generally, but it doesn't seem worth it. */
6318 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6319 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6320 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6321 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6322 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6323 == GET_MODE_PRECISION (mode))
6324 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6325 return
6326 simplify_shift_const (NULL_RTX, ASHIFT, mode,
6327 gen_lowpart (mode, XEXP (cond, 0)), i);
6329 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6330 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6331 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6332 && GET_MODE (XEXP (cond, 0)) == mode
6333 && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6334 == nonzero_bits (XEXP (cond, 0), mode)
6335 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6336 return XEXP (cond, 0);
6338 return x;
6341 /* Simplify X, a SET expression. Return the new expression. */
6343 static rtx
6344 simplify_set (rtx x)
6346 rtx src = SET_SRC (x);
6347 rtx dest = SET_DEST (x);
6348 enum machine_mode mode
6349 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6350 rtx other_insn;
6351 rtx *cc_use;
6353 /* (set (pc) (return)) gets written as (return). */
6354 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6355 return src;
6357 /* Now that we know for sure which bits of SRC we are using, see if we can
6358 simplify the expression for the object knowing that we only need the
6359 low-order bits. */
6361 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6363 src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6364 SUBST (SET_SRC (x), src);
6367 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6368 the comparison result and try to simplify it unless we already have used
6369 undobuf.other_insn. */
6370 if ((GET_MODE_CLASS (mode) == MODE_CC
6371 || GET_CODE (src) == COMPARE
6372 || CC0_P (dest))
6373 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6374 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6375 && COMPARISON_P (*cc_use)
6376 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6378 enum rtx_code old_code = GET_CODE (*cc_use);
6379 enum rtx_code new_code;
6380 rtx op0, op1, tmp;
6381 int other_changed = 0;
6382 rtx inner_compare = NULL_RTX;
6383 enum machine_mode compare_mode = GET_MODE (dest);
6385 if (GET_CODE (src) == COMPARE)
6387 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6388 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6390 inner_compare = op0;
6391 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6394 else
6395 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6397 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6398 op0, op1);
6399 if (!tmp)
6400 new_code = old_code;
6401 else if (!CONSTANT_P (tmp))
6403 new_code = GET_CODE (tmp);
6404 op0 = XEXP (tmp, 0);
6405 op1 = XEXP (tmp, 1);
6407 else
6409 rtx pat = PATTERN (other_insn);
6410 undobuf.other_insn = other_insn;
6411 SUBST (*cc_use, tmp);
6413 /* Attempt to simplify CC user. */
6414 if (GET_CODE (pat) == SET)
6416 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6417 if (new_rtx != NULL_RTX)
6418 SUBST (SET_SRC (pat), new_rtx);
6421 /* Convert X into a no-op move. */
6422 SUBST (SET_DEST (x), pc_rtx);
6423 SUBST (SET_SRC (x), pc_rtx);
6424 return x;
6427 /* Simplify our comparison, if possible. */
6428 new_code = simplify_comparison (new_code, &op0, &op1);
6430 #ifdef SELECT_CC_MODE
6431 /* If this machine has CC modes other than CCmode, check to see if we
6432 need to use a different CC mode here. */
6433 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6434 compare_mode = GET_MODE (op0);
6435 else if (inner_compare
6436 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6437 && new_code == old_code
6438 && op0 == XEXP (inner_compare, 0)
6439 && op1 == XEXP (inner_compare, 1))
6440 compare_mode = GET_MODE (inner_compare);
6441 else
6442 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6444 #ifndef HAVE_cc0
6445 /* If the mode changed, we have to change SET_DEST, the mode in the
6446 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6447 a hard register, just build new versions with the proper mode. If it
6448 is a pseudo, we lose unless it is only time we set the pseudo, in
6449 which case we can safely change its mode. */
6450 if (compare_mode != GET_MODE (dest))
6452 if (can_change_dest_mode (dest, 0, compare_mode))
6454 unsigned int regno = REGNO (dest);
6455 rtx new_dest;
6457 if (regno < FIRST_PSEUDO_REGISTER)
6458 new_dest = gen_rtx_REG (compare_mode, regno);
6459 else
6461 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6462 new_dest = regno_reg_rtx[regno];
6465 SUBST (SET_DEST (x), new_dest);
6466 SUBST (XEXP (*cc_use, 0), new_dest);
6467 other_changed = 1;
6469 dest = new_dest;
6472 #endif /* cc0 */
6473 #endif /* SELECT_CC_MODE */
6475 /* If the code changed, we have to build a new comparison in
6476 undobuf.other_insn. */
6477 if (new_code != old_code)
6479 int other_changed_previously = other_changed;
6480 unsigned HOST_WIDE_INT mask;
6481 rtx old_cc_use = *cc_use;
6483 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6484 dest, const0_rtx));
6485 other_changed = 1;
6487 /* If the only change we made was to change an EQ into an NE or
6488 vice versa, OP0 has only one bit that might be nonzero, and OP1
6489 is zero, check if changing the user of the condition code will
6490 produce a valid insn. If it won't, we can keep the original code
6491 in that insn by surrounding our operation with an XOR. */
6493 if (((old_code == NE && new_code == EQ)
6494 || (old_code == EQ && new_code == NE))
6495 && ! other_changed_previously && op1 == const0_rtx
6496 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6497 && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6499 rtx pat = PATTERN (other_insn), note = 0;
6501 if ((recog_for_combine (&pat, other_insn, &note) < 0
6502 && ! check_asm_operands (pat)))
6504 *cc_use = old_cc_use;
6505 other_changed = 0;
6507 op0 = simplify_gen_binary (XOR, GET_MODE (op0),
6508 op0, GEN_INT (mask));
6513 if (other_changed)
6514 undobuf.other_insn = other_insn;
6516 /* Otherwise, if we didn't previously have a COMPARE in the
6517 correct mode, we need one. */
6518 if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
6520 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6521 src = SET_SRC (x);
6523 else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6525 SUBST (SET_SRC (x), op0);
6526 src = SET_SRC (x);
6528 /* Otherwise, update the COMPARE if needed. */
6529 else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6531 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6532 src = SET_SRC (x);
6535 else
6537 /* Get SET_SRC in a form where we have placed back any
6538 compound expressions. Then do the checks below. */
6539 src = make_compound_operation (src, SET);
6540 SUBST (SET_SRC (x), src);
6543 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6544 and X being a REG or (subreg (reg)), we may be able to convert this to
6545 (set (subreg:m2 x) (op)).
6547 We can always do this if M1 is narrower than M2 because that means that
6548 we only care about the low bits of the result.
6550 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6551 perform a narrower operation than requested since the high-order bits will
6552 be undefined. On machine where it is defined, this transformation is safe
6553 as long as M1 and M2 have the same number of words. */
6555 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6556 && !OBJECT_P (SUBREG_REG (src))
6557 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6558 / UNITS_PER_WORD)
6559 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6560 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6561 #ifndef WORD_REGISTER_OPERATIONS
6562 && (GET_MODE_SIZE (GET_MODE (src))
6563 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6564 #endif
6565 #ifdef CANNOT_CHANGE_MODE_CLASS
6566 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6567 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6568 GET_MODE (SUBREG_REG (src)),
6569 GET_MODE (src)))
6570 #endif
6571 && (REG_P (dest)
6572 || (GET_CODE (dest) == SUBREG
6573 && REG_P (SUBREG_REG (dest)))))
6575 SUBST (SET_DEST (x),
6576 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6577 dest));
6578 SUBST (SET_SRC (x), SUBREG_REG (src));
6580 src = SET_SRC (x), dest = SET_DEST (x);
6583 #ifdef HAVE_cc0
6584 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6585 in SRC. */
6586 if (dest == cc0_rtx
6587 && GET_CODE (src) == SUBREG
6588 && subreg_lowpart_p (src)
6589 && (GET_MODE_PRECISION (GET_MODE (src))
6590 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
6592 rtx inner = SUBREG_REG (src);
6593 enum machine_mode inner_mode = GET_MODE (inner);
6595 /* Here we make sure that we don't have a sign bit on. */
6596 if (val_signbit_known_clear_p (GET_MODE (src),
6597 nonzero_bits (inner, inner_mode)))
6599 SUBST (SET_SRC (x), inner);
6600 src = SET_SRC (x);
6603 #endif
6605 #ifdef LOAD_EXTEND_OP
6606 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6607 would require a paradoxical subreg. Replace the subreg with a
6608 zero_extend to avoid the reload that would otherwise be required. */
6610 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6611 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6612 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6613 && SUBREG_BYTE (src) == 0
6614 && paradoxical_subreg_p (src)
6615 && MEM_P (SUBREG_REG (src)))
6617 SUBST (SET_SRC (x),
6618 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6619 GET_MODE (src), SUBREG_REG (src)));
6621 src = SET_SRC (x);
6623 #endif
6625 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6626 are comparing an item known to be 0 or -1 against 0, use a logical
6627 operation instead. Check for one of the arms being an IOR of the other
6628 arm with some value. We compute three terms to be IOR'ed together. In
6629 practice, at most two will be nonzero. Then we do the IOR's. */
6631 if (GET_CODE (dest) != PC
6632 && GET_CODE (src) == IF_THEN_ELSE
6633 && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6634 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6635 && XEXP (XEXP (src, 0), 1) == const0_rtx
6636 && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6637 #ifdef HAVE_conditional_move
6638 && ! can_conditionally_move_p (GET_MODE (src))
6639 #endif
6640 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6641 GET_MODE (XEXP (XEXP (src, 0), 0)))
6642 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
6643 && ! side_effects_p (src))
6645 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6646 ? XEXP (src, 1) : XEXP (src, 2));
6647 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6648 ? XEXP (src, 2) : XEXP (src, 1));
6649 rtx term1 = const0_rtx, term2, term3;
6651 if (GET_CODE (true_rtx) == IOR
6652 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6653 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6654 else if (GET_CODE (true_rtx) == IOR
6655 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6656 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6657 else if (GET_CODE (false_rtx) == IOR
6658 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6659 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6660 else if (GET_CODE (false_rtx) == IOR
6661 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6662 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6664 term2 = simplify_gen_binary (AND, GET_MODE (src),
6665 XEXP (XEXP (src, 0), 0), true_rtx);
6666 term3 = simplify_gen_binary (AND, GET_MODE (src),
6667 simplify_gen_unary (NOT, GET_MODE (src),
6668 XEXP (XEXP (src, 0), 0),
6669 GET_MODE (src)),
6670 false_rtx);
6672 SUBST (SET_SRC (x),
6673 simplify_gen_binary (IOR, GET_MODE (src),
6674 simplify_gen_binary (IOR, GET_MODE (src),
6675 term1, term2),
6676 term3));
6678 src = SET_SRC (x);
6681 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6682 whole thing fail. */
6683 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6684 return src;
6685 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6686 return dest;
6687 else
6688 /* Convert this into a field assignment operation, if possible. */
6689 return make_field_assignment (x);
6692 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6693 result. */
6695 static rtx
6696 simplify_logical (rtx x)
6698 enum machine_mode mode = GET_MODE (x);
6699 rtx op0 = XEXP (x, 0);
6700 rtx op1 = XEXP (x, 1);
6702 switch (GET_CODE (x))
6704 case AND:
6705 /* We can call simplify_and_const_int only if we don't lose
6706 any (sign) bits when converting INTVAL (op1) to
6707 "unsigned HOST_WIDE_INT". */
6708 if (CONST_INT_P (op1)
6709 && (HWI_COMPUTABLE_MODE_P (mode)
6710 || INTVAL (op1) > 0))
6712 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6713 if (GET_CODE (x) != AND)
6714 return x;
6716 op0 = XEXP (x, 0);
6717 op1 = XEXP (x, 1);
6720 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6721 apply the distributive law and then the inverse distributive
6722 law to see if things simplify. */
6723 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6725 rtx result = distribute_and_simplify_rtx (x, 0);
6726 if (result)
6727 return result;
6729 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6731 rtx result = distribute_and_simplify_rtx (x, 1);
6732 if (result)
6733 return result;
6735 break;
6737 case IOR:
6738 /* If we have (ior (and A B) C), apply the distributive law and then
6739 the inverse distributive law to see if things simplify. */
6741 if (GET_CODE (op0) == AND)
6743 rtx result = distribute_and_simplify_rtx (x, 0);
6744 if (result)
6745 return result;
6748 if (GET_CODE (op1) == AND)
6750 rtx result = distribute_and_simplify_rtx (x, 1);
6751 if (result)
6752 return result;
6754 break;
6756 default:
6757 gcc_unreachable ();
6760 return x;
6763 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6764 operations" because they can be replaced with two more basic operations.
6765 ZERO_EXTEND is also considered "compound" because it can be replaced with
6766 an AND operation, which is simpler, though only one operation.
6768 The function expand_compound_operation is called with an rtx expression
6769 and will convert it to the appropriate shifts and AND operations,
6770 simplifying at each stage.
6772 The function make_compound_operation is called to convert an expression
6773 consisting of shifts and ANDs into the equivalent compound expression.
6774 It is the inverse of this function, loosely speaking. */
6776 static rtx
6777 expand_compound_operation (rtx x)
6779 unsigned HOST_WIDE_INT pos = 0, len;
6780 int unsignedp = 0;
6781 unsigned int modewidth;
6782 rtx tem;
6784 switch (GET_CODE (x))
6786 case ZERO_EXTEND:
6787 unsignedp = 1;
6788 case SIGN_EXTEND:
6789 /* We can't necessarily use a const_int for a multiword mode;
6790 it depends on implicitly extending the value.
6791 Since we don't know the right way to extend it,
6792 we can't tell whether the implicit way is right.
6794 Even for a mode that is no wider than a const_int,
6795 we can't win, because we need to sign extend one of its bits through
6796 the rest of it, and we don't know which bit. */
6797 if (CONST_INT_P (XEXP (x, 0)))
6798 return x;
6800 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6801 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6802 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6803 reloaded. If not for that, MEM's would very rarely be safe.
6805 Reject MODEs bigger than a word, because we might not be able
6806 to reference a two-register group starting with an arbitrary register
6807 (and currently gen_lowpart might crash for a SUBREG). */
6809 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6810 return x;
6812 /* Reject MODEs that aren't scalar integers because turning vector
6813 or complex modes into shifts causes problems. */
6815 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6816 return x;
6818 len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
6819 /* If the inner object has VOIDmode (the only way this can happen
6820 is if it is an ASM_OPERANDS), we can't do anything since we don't
6821 know how much masking to do. */
6822 if (len == 0)
6823 return x;
6825 break;
6827 case ZERO_EXTRACT:
6828 unsignedp = 1;
6830 /* ... fall through ... */
6832 case SIGN_EXTRACT:
6833 /* If the operand is a CLOBBER, just return it. */
6834 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
6835 return XEXP (x, 0);
6837 if (!CONST_INT_P (XEXP (x, 1))
6838 || !CONST_INT_P (XEXP (x, 2))
6839 || GET_MODE (XEXP (x, 0)) == VOIDmode)
6840 return x;
6842 /* Reject MODEs that aren't scalar integers because turning vector
6843 or complex modes into shifts causes problems. */
6845 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6846 return x;
6848 len = INTVAL (XEXP (x, 1));
6849 pos = INTVAL (XEXP (x, 2));
6851 /* This should stay within the object being extracted, fail otherwise. */
6852 if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
6853 return x;
6855 if (BITS_BIG_ENDIAN)
6856 pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
6858 break;
6860 default:
6861 return x;
6863 /* Convert sign extension to zero extension, if we know that the high
6864 bit is not set, as this is easier to optimize. It will be converted
6865 back to cheaper alternative in make_extraction. */
6866 if (GET_CODE (x) == SIGN_EXTEND
6867 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6868 && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
6869 & ~(((unsigned HOST_WIDE_INT)
6870 GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
6871 >> 1))
6872 == 0)))
6874 rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
6875 rtx temp2 = expand_compound_operation (temp);
6877 /* Make sure this is a profitable operation. */
6878 if (set_src_cost (x, optimize_this_for_speed_p)
6879 > set_src_cost (temp2, optimize_this_for_speed_p))
6880 return temp2;
6881 else if (set_src_cost (x, optimize_this_for_speed_p)
6882 > set_src_cost (temp, optimize_this_for_speed_p))
6883 return temp;
6884 else
6885 return x;
6888 /* We can optimize some special cases of ZERO_EXTEND. */
6889 if (GET_CODE (x) == ZERO_EXTEND)
6891 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
6892 know that the last value didn't have any inappropriate bits
6893 set. */
6894 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6895 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6896 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6897 && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
6898 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6899 return XEXP (XEXP (x, 0), 0);
6901 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6902 if (GET_CODE (XEXP (x, 0)) == SUBREG
6903 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6904 && subreg_lowpart_p (XEXP (x, 0))
6905 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6906 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
6907 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6908 return SUBREG_REG (XEXP (x, 0));
6910 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
6911 is a comparison and STORE_FLAG_VALUE permits. This is like
6912 the first case, but it works even when GET_MODE (x) is larger
6913 than HOST_WIDE_INT. */
6914 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6915 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6916 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
6917 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
6918 <= HOST_BITS_PER_WIDE_INT)
6919 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6920 return XEXP (XEXP (x, 0), 0);
6922 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6923 if (GET_CODE (XEXP (x, 0)) == SUBREG
6924 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6925 && subreg_lowpart_p (XEXP (x, 0))
6926 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
6927 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
6928 <= HOST_BITS_PER_WIDE_INT)
6929 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6930 return SUBREG_REG (XEXP (x, 0));
6934 /* If we reach here, we want to return a pair of shifts. The inner
6935 shift is a left shift of BITSIZE - POS - LEN bits. The outer
6936 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
6937 logical depending on the value of UNSIGNEDP.
6939 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
6940 converted into an AND of a shift.
6942 We must check for the case where the left shift would have a negative
6943 count. This can happen in a case like (x >> 31) & 255 on machines
6944 that can't shift by a constant. On those machines, we would first
6945 combine the shift with the AND to produce a variable-position
6946 extraction. Then the constant of 31 would be substituted in
6947 to produce such a position. */
6949 modewidth = GET_MODE_PRECISION (GET_MODE (x));
6950 if (modewidth >= pos + len)
6952 enum machine_mode mode = GET_MODE (x);
6953 tem = gen_lowpart (mode, XEXP (x, 0));
6954 if (!tem || GET_CODE (tem) == CLOBBER)
6955 return x;
6956 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6957 tem, modewidth - pos - len);
6958 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
6959 mode, tem, modewidth - len);
6961 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
6962 tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
6963 simplify_shift_const (NULL_RTX, LSHIFTRT,
6964 GET_MODE (x),
6965 XEXP (x, 0), pos),
6966 ((unsigned HOST_WIDE_INT) 1 << len) - 1);
6967 else
6968 /* Any other cases we can't handle. */
6969 return x;
6971 /* If we couldn't do this for some reason, return the original
6972 expression. */
6973 if (GET_CODE (tem) == CLOBBER)
6974 return x;
6976 return tem;
6979 /* X is a SET which contains an assignment of one object into
6980 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
6981 or certain SUBREGS). If possible, convert it into a series of
6982 logical operations.
6984 We half-heartedly support variable positions, but do not at all
6985 support variable lengths. */
6987 static const_rtx
6988 expand_field_assignment (const_rtx x)
6990 rtx inner;
6991 rtx pos; /* Always counts from low bit. */
6992 int len;
6993 rtx mask, cleared, masked;
6994 enum machine_mode compute_mode;
6996 /* Loop until we find something we can't simplify. */
6997 while (1)
6999 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7000 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7002 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7003 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7004 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7006 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7007 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7009 inner = XEXP (SET_DEST (x), 0);
7010 len = INTVAL (XEXP (SET_DEST (x), 1));
7011 pos = XEXP (SET_DEST (x), 2);
7013 /* A constant position should stay within the width of INNER. */
7014 if (CONST_INT_P (pos)
7015 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7016 break;
7018 if (BITS_BIG_ENDIAN)
7020 if (CONST_INT_P (pos))
7021 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7022 - INTVAL (pos));
7023 else if (GET_CODE (pos) == MINUS
7024 && CONST_INT_P (XEXP (pos, 1))
7025 && (INTVAL (XEXP (pos, 1))
7026 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7027 /* If position is ADJUST - X, new position is X. */
7028 pos = XEXP (pos, 0);
7029 else
7030 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7031 GEN_INT (GET_MODE_PRECISION (
7032 GET_MODE (inner))
7033 - len),
7034 pos);
7038 /* A SUBREG between two modes that occupy the same numbers of words
7039 can be done by moving the SUBREG to the source. */
7040 else if (GET_CODE (SET_DEST (x)) == SUBREG
7041 /* We need SUBREGs to compute nonzero_bits properly. */
7042 && nonzero_sign_valid
7043 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7044 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7045 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7046 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7048 x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
7049 gen_lowpart
7050 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7051 SET_SRC (x)));
7052 continue;
7054 else
7055 break;
7057 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7058 inner = SUBREG_REG (inner);
7060 compute_mode = GET_MODE (inner);
7062 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7063 if (! SCALAR_INT_MODE_P (compute_mode))
7065 enum machine_mode imode;
7067 /* Don't do anything for vector or complex integral types. */
7068 if (! FLOAT_MODE_P (compute_mode))
7069 break;
7071 /* Try to find an integral mode to pun with. */
7072 imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
7073 if (imode == BLKmode)
7074 break;
7076 compute_mode = imode;
7077 inner = gen_lowpart (imode, inner);
7080 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7081 if (len >= HOST_BITS_PER_WIDE_INT)
7082 break;
7084 /* Now compute the equivalent expression. Make a copy of INNER
7085 for the SET_DEST in case it is a MEM into which we will substitute;
7086 we don't want shared RTL in that case. */
7087 mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << len) - 1);
7088 cleared = simplify_gen_binary (AND, compute_mode,
7089 simplify_gen_unary (NOT, compute_mode,
7090 simplify_gen_binary (ASHIFT,
7091 compute_mode,
7092 mask, pos),
7093 compute_mode),
7094 inner);
7095 masked = simplify_gen_binary (ASHIFT, compute_mode,
7096 simplify_gen_binary (
7097 AND, compute_mode,
7098 gen_lowpart (compute_mode, SET_SRC (x)),
7099 mask),
7100 pos);
7102 x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
7103 simplify_gen_binary (IOR, compute_mode,
7104 cleared, masked));
7107 return x;
7110 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7111 it is an RTX that represents a variable starting position; otherwise,
7112 POS is the (constant) starting bit position (counted from the LSB).
7114 UNSIGNEDP is nonzero for an unsigned reference and zero for a
7115 signed reference.
7117 IN_DEST is nonzero if this is a reference in the destination of a
7118 SET. This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7119 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7120 be used.
7122 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7123 ZERO_EXTRACT should be built even for bits starting at bit 0.
7125 MODE is the desired mode of the result (if IN_DEST == 0).
7127 The result is an RTX for the extraction or NULL_RTX if the target
7128 can't handle it. */
7130 static rtx
7131 make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7132 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7133 int in_dest, int in_compare)
7135 /* This mode describes the size of the storage area
7136 to fetch the overall value from. Within that, we
7137 ignore the POS lowest bits, etc. */
7138 enum machine_mode is_mode = GET_MODE (inner);
7139 enum machine_mode inner_mode;
7140 enum machine_mode wanted_inner_mode;
7141 enum machine_mode wanted_inner_reg_mode = word_mode;
7142 enum machine_mode pos_mode = word_mode;
7143 enum machine_mode extraction_mode = word_mode;
7144 enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
7145 rtx new_rtx = 0;
7146 rtx orig_pos_rtx = pos_rtx;
7147 HOST_WIDE_INT orig_pos;
7149 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7151 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7152 consider just the QI as the memory to extract from.
7153 The subreg adds or removes high bits; its mode is
7154 irrelevant to the meaning of this extraction,
7155 since POS and LEN count from the lsb. */
7156 if (MEM_P (SUBREG_REG (inner)))
7157 is_mode = GET_MODE (SUBREG_REG (inner));
7158 inner = SUBREG_REG (inner);
7160 else if (GET_CODE (inner) == ASHIFT
7161 && CONST_INT_P (XEXP (inner, 1))
7162 && pos_rtx == 0 && pos == 0
7163 && len > UINTVAL (XEXP (inner, 1)))
7165 /* We're extracting the least significant bits of an rtx
7166 (ashift X (const_int C)), where LEN > C. Extract the
7167 least significant (LEN - C) bits of X, giving an rtx
7168 whose mode is MODE, then shift it left C times. */
7169 new_rtx = make_extraction (mode, XEXP (inner, 0),
7170 0, 0, len - INTVAL (XEXP (inner, 1)),
7171 unsignedp, in_dest, in_compare);
7172 if (new_rtx != 0)
7173 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7176 inner_mode = GET_MODE (inner);
7178 if (pos_rtx && CONST_INT_P (pos_rtx))
7179 pos = INTVAL (pos_rtx), pos_rtx = 0;
7181 /* See if this can be done without an extraction. We never can if the
7182 width of the field is not the same as that of some integer mode. For
7183 registers, we can only avoid the extraction if the position is at the
7184 low-order bit and this is either not in the destination or we have the
7185 appropriate STRICT_LOW_PART operation available.
7187 For MEM, we can avoid an extract if the field starts on an appropriate
7188 boundary and we can change the mode of the memory reference. */
7190 if (tmode != BLKmode
7191 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7192 && !MEM_P (inner)
7193 && (inner_mode == tmode
7194 || !REG_P (inner)
7195 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7196 || reg_truncated_to_mode (tmode, inner))
7197 && (! in_dest
7198 || (REG_P (inner)
7199 && have_insn_for (STRICT_LOW_PART, tmode))))
7200 || (MEM_P (inner) && pos_rtx == 0
7201 && (pos
7202 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7203 : BITS_PER_UNIT)) == 0
7204 /* We can't do this if we are widening INNER_MODE (it
7205 may not be aligned, for one thing). */
7206 && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
7207 && (inner_mode == tmode
7208 || (! mode_dependent_address_p (XEXP (inner, 0))
7209 && ! MEM_VOLATILE_P (inner))))))
7211 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7212 field. If the original and current mode are the same, we need not
7213 adjust the offset. Otherwise, we do if bytes big endian.
7215 If INNER is not a MEM, get a piece consisting of just the field
7216 of interest (in this case POS % BITS_PER_WORD must be 0). */
7218 if (MEM_P (inner))
7220 HOST_WIDE_INT offset;
7222 /* POS counts from lsb, but make OFFSET count in memory order. */
7223 if (BYTES_BIG_ENDIAN)
7224 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7225 else
7226 offset = pos / BITS_PER_UNIT;
7228 new_rtx = adjust_address_nv (inner, tmode, offset);
7230 else if (REG_P (inner))
7232 if (tmode != inner_mode)
7234 /* We can't call gen_lowpart in a DEST since we
7235 always want a SUBREG (see below) and it would sometimes
7236 return a new hard register. */
7237 if (pos || in_dest)
7239 HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7241 if (WORDS_BIG_ENDIAN
7242 && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7243 final_word = ((GET_MODE_SIZE (inner_mode)
7244 - GET_MODE_SIZE (tmode))
7245 / UNITS_PER_WORD) - final_word;
7247 final_word *= UNITS_PER_WORD;
7248 if (BYTES_BIG_ENDIAN &&
7249 GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7250 final_word += (GET_MODE_SIZE (inner_mode)
7251 - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7253 /* Avoid creating invalid subregs, for example when
7254 simplifying (x>>32)&255. */
7255 if (!validate_subreg (tmode, inner_mode, inner, final_word))
7256 return NULL_RTX;
7258 new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7260 else
7261 new_rtx = gen_lowpart (tmode, inner);
7263 else
7264 new_rtx = inner;
7266 else
7267 new_rtx = force_to_mode (inner, tmode,
7268 len >= HOST_BITS_PER_WIDE_INT
7269 ? ~(unsigned HOST_WIDE_INT) 0
7270 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
7273 /* If this extraction is going into the destination of a SET,
7274 make a STRICT_LOW_PART unless we made a MEM. */
7276 if (in_dest)
7277 return (MEM_P (new_rtx) ? new_rtx
7278 : (GET_CODE (new_rtx) != SUBREG
7279 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7280 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7282 if (mode == tmode)
7283 return new_rtx;
7285 if (CONST_INT_P (new_rtx)
7286 || GET_CODE (new_rtx) == CONST_DOUBLE)
7287 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7288 mode, new_rtx, tmode);
7290 /* If we know that no extraneous bits are set, and that the high
7291 bit is not set, convert the extraction to the cheaper of
7292 sign and zero extension, that are equivalent in these cases. */
7293 if (flag_expensive_optimizations
7294 && (HWI_COMPUTABLE_MODE_P (tmode)
7295 && ((nonzero_bits (new_rtx, tmode)
7296 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7297 == 0)))
7299 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7300 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7302 /* Prefer ZERO_EXTENSION, since it gives more information to
7303 backends. */
7304 if (set_src_cost (temp, optimize_this_for_speed_p)
7305 <= set_src_cost (temp1, optimize_this_for_speed_p))
7306 return temp;
7307 return temp1;
7310 /* Otherwise, sign- or zero-extend unless we already are in the
7311 proper mode. */
7313 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7314 mode, new_rtx));
7317 /* Unless this is a COMPARE or we have a funny memory reference,
7318 don't do anything with zero-extending field extracts starting at
7319 the low-order bit since they are simple AND operations. */
7320 if (pos_rtx == 0 && pos == 0 && ! in_dest
7321 && ! in_compare && unsignedp)
7322 return 0;
7324 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7325 if the position is not a constant and the length is not 1. In all
7326 other cases, we would only be going outside our object in cases when
7327 an original shift would have been undefined. */
7328 if (MEM_P (inner)
7329 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7330 || (pos_rtx != 0 && len != 1)))
7331 return 0;
7333 /* Get the mode to use should INNER not be a MEM, the mode for the position,
7334 and the mode for the result. */
7335 if (in_dest && mode_for_extraction (EP_insv, -1) != MAX_MACHINE_MODE)
7337 wanted_inner_reg_mode = mode_for_extraction (EP_insv, 0);
7338 pos_mode = mode_for_extraction (EP_insv, 2);
7339 extraction_mode = mode_for_extraction (EP_insv, 3);
7342 if (! in_dest && unsignedp
7343 && mode_for_extraction (EP_extzv, -1) != MAX_MACHINE_MODE)
7345 wanted_inner_reg_mode = mode_for_extraction (EP_extzv, 1);
7346 pos_mode = mode_for_extraction (EP_extzv, 3);
7347 extraction_mode = mode_for_extraction (EP_extzv, 0);
7350 if (! in_dest && ! unsignedp
7351 && mode_for_extraction (EP_extv, -1) != MAX_MACHINE_MODE)
7353 wanted_inner_reg_mode = mode_for_extraction (EP_extv, 1);
7354 pos_mode = mode_for_extraction (EP_extv, 3);
7355 extraction_mode = mode_for_extraction (EP_extv, 0);
7358 /* Never narrow an object, since that might not be safe. */
7360 if (mode != VOIDmode
7361 && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7362 extraction_mode = mode;
7364 if (pos_rtx && GET_MODE (pos_rtx) != VOIDmode
7365 && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
7366 pos_mode = GET_MODE (pos_rtx);
7368 /* If this is not from memory, the desired mode is the preferred mode
7369 for an extraction pattern's first input operand, or word_mode if there
7370 is none. */
7371 if (!MEM_P (inner))
7372 wanted_inner_mode = wanted_inner_reg_mode;
7373 else
7375 /* Be careful not to go beyond the extracted object and maintain the
7376 natural alignment of the memory. */
7377 wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7378 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7379 > GET_MODE_BITSIZE (wanted_inner_mode))
7381 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7382 gcc_assert (wanted_inner_mode != VOIDmode);
7385 /* If we have to change the mode of memory and cannot, the desired mode
7386 is EXTRACTION_MODE. */
7387 if (inner_mode != wanted_inner_mode
7388 && (mode_dependent_address_p (XEXP (inner, 0))
7389 || MEM_VOLATILE_P (inner)
7390 || pos_rtx))
7391 wanted_inner_mode = extraction_mode;
7394 orig_pos = pos;
7396 if (BITS_BIG_ENDIAN)
7398 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7399 BITS_BIG_ENDIAN style. If position is constant, compute new
7400 position. Otherwise, build subtraction.
7401 Note that POS is relative to the mode of the original argument.
7402 If it's a MEM we need to recompute POS relative to that.
7403 However, if we're extracting from (or inserting into) a register,
7404 we want to recompute POS relative to wanted_inner_mode. */
7405 int width = (MEM_P (inner)
7406 ? GET_MODE_BITSIZE (is_mode)
7407 : GET_MODE_BITSIZE (wanted_inner_mode));
7409 if (pos_rtx == 0)
7410 pos = width - len - pos;
7411 else
7412 pos_rtx
7413 = gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
7414 /* POS may be less than 0 now, but we check for that below.
7415 Note that it can only be less than 0 if !MEM_P (inner). */
7418 /* If INNER has a wider mode, and this is a constant extraction, try to
7419 make it smaller and adjust the byte to point to the byte containing
7420 the value. */
7421 if (wanted_inner_mode != VOIDmode
7422 && inner_mode != wanted_inner_mode
7423 && ! pos_rtx
7424 && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7425 && MEM_P (inner)
7426 && ! mode_dependent_address_p (XEXP (inner, 0))
7427 && ! MEM_VOLATILE_P (inner))
7429 int offset = 0;
7431 /* The computations below will be correct if the machine is big
7432 endian in both bits and bytes or little endian in bits and bytes.
7433 If it is mixed, we must adjust. */
7435 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7436 adjust OFFSET to compensate. */
7437 if (BYTES_BIG_ENDIAN
7438 && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7439 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7441 /* We can now move to the desired byte. */
7442 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7443 * GET_MODE_SIZE (wanted_inner_mode);
7444 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7446 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7447 && is_mode != wanted_inner_mode)
7448 offset = (GET_MODE_SIZE (is_mode)
7449 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7451 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7454 /* If INNER is not memory, get it into the proper mode. If we are changing
7455 its mode, POS must be a constant and smaller than the size of the new
7456 mode. */
7457 else if (!MEM_P (inner))
7459 /* On the LHS, don't create paradoxical subregs implicitely truncating
7460 the register unless TRULY_NOOP_TRUNCATION. */
7461 if (in_dest
7462 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7463 wanted_inner_mode))
7464 return NULL_RTX;
7466 if (GET_MODE (inner) != wanted_inner_mode
7467 && (pos_rtx != 0
7468 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7469 return NULL_RTX;
7471 if (orig_pos < 0)
7472 return NULL_RTX;
7474 inner = force_to_mode (inner, wanted_inner_mode,
7475 pos_rtx
7476 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7477 ? ~(unsigned HOST_WIDE_INT) 0
7478 : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7479 << orig_pos),
7483 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7484 have to zero extend. Otherwise, we can just use a SUBREG. */
7485 if (pos_rtx != 0
7486 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7488 rtx temp = gen_rtx_ZERO_EXTEND (pos_mode, pos_rtx);
7490 /* If we know that no extraneous bits are set, and that the high
7491 bit is not set, convert extraction to cheaper one - either
7492 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7493 cases. */
7494 if (flag_expensive_optimizations
7495 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7496 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7497 & ~(((unsigned HOST_WIDE_INT)
7498 GET_MODE_MASK (GET_MODE (pos_rtx)))
7499 >> 1))
7500 == 0)))
7502 rtx temp1 = gen_rtx_SIGN_EXTEND (pos_mode, pos_rtx);
7504 /* Prefer ZERO_EXTENSION, since it gives more information to
7505 backends. */
7506 if (set_src_cost (temp1, optimize_this_for_speed_p)
7507 < set_src_cost (temp, optimize_this_for_speed_p))
7508 temp = temp1;
7510 pos_rtx = temp;
7512 else if (pos_rtx != 0
7513 && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
7514 pos_rtx = gen_lowpart (pos_mode, pos_rtx);
7516 /* Make POS_RTX unless we already have it and it is correct. If we don't
7517 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7518 be a CONST_INT. */
7519 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7520 pos_rtx = orig_pos_rtx;
7522 else if (pos_rtx == 0)
7523 pos_rtx = GEN_INT (pos);
7525 /* Make the required operation. See if we can use existing rtx. */
7526 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7527 extraction_mode, inner, GEN_INT (len), pos_rtx);
7528 if (! in_dest)
7529 new_rtx = gen_lowpart (mode, new_rtx);
7531 return new_rtx;
7534 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7535 with any other operations in X. Return X without that shift if so. */
7537 static rtx
7538 extract_left_shift (rtx x, int count)
7540 enum rtx_code code = GET_CODE (x);
7541 enum machine_mode mode = GET_MODE (x);
7542 rtx tem;
7544 switch (code)
7546 case ASHIFT:
7547 /* This is the shift itself. If it is wide enough, we will return
7548 either the value being shifted if the shift count is equal to
7549 COUNT or a shift for the difference. */
7550 if (CONST_INT_P (XEXP (x, 1))
7551 && INTVAL (XEXP (x, 1)) >= count)
7552 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7553 INTVAL (XEXP (x, 1)) - count);
7554 break;
7556 case NEG: case NOT:
7557 if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7558 return simplify_gen_unary (code, mode, tem, mode);
7560 break;
7562 case PLUS: case IOR: case XOR: case AND:
7563 /* If we can safely shift this constant and we find the inner shift,
7564 make a new operation. */
7565 if (CONST_INT_P (XEXP (x, 1))
7566 && (UINTVAL (XEXP (x, 1))
7567 & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7568 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7569 return simplify_gen_binary (code, mode, tem,
7570 GEN_INT (INTVAL (XEXP (x, 1)) >> count));
7572 break;
7574 default:
7575 break;
7578 return 0;
7581 /* Look at the expression rooted at X. Look for expressions
7582 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7583 Form these expressions.
7585 Return the new rtx, usually just X.
7587 Also, for machines like the VAX that don't have logical shift insns,
7588 try to convert logical to arithmetic shift operations in cases where
7589 they are equivalent. This undoes the canonicalizations to logical
7590 shifts done elsewhere.
7592 We try, as much as possible, to re-use rtl expressions to save memory.
7594 IN_CODE says what kind of expression we are processing. Normally, it is
7595 SET. In a memory address (inside a MEM, PLUS or minus, the latter two
7596 being kludges), it is MEM. When processing the arguments of a comparison
7597 or a COMPARE against zero, it is COMPARE. */
7599 static rtx
7600 make_compound_operation (rtx x, enum rtx_code in_code)
7602 enum rtx_code code = GET_CODE (x);
7603 enum machine_mode mode = GET_MODE (x);
7604 int mode_width = GET_MODE_PRECISION (mode);
7605 rtx rhs, lhs;
7606 enum rtx_code next_code;
7607 int i, j;
7608 rtx new_rtx = 0;
7609 rtx tem;
7610 const char *fmt;
7612 /* Select the code to be used in recursive calls. Once we are inside an
7613 address, we stay there. If we have a comparison, set to COMPARE,
7614 but once inside, go back to our default of SET. */
7616 next_code = (code == MEM ? MEM
7617 : ((code == PLUS || code == MINUS)
7618 && SCALAR_INT_MODE_P (mode)) ? MEM
7619 : ((code == COMPARE || COMPARISON_P (x))
7620 && XEXP (x, 1) == const0_rtx) ? COMPARE
7621 : in_code == COMPARE ? SET : in_code);
7623 /* Process depending on the code of this operation. If NEW is set
7624 nonzero, it will be returned. */
7626 switch (code)
7628 case ASHIFT:
7629 /* Convert shifts by constants into multiplications if inside
7630 an address. */
7631 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7632 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7633 && INTVAL (XEXP (x, 1)) >= 0
7634 && SCALAR_INT_MODE_P (mode))
7636 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7637 HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7639 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7640 if (GET_CODE (new_rtx) == NEG)
7642 new_rtx = XEXP (new_rtx, 0);
7643 multval = -multval;
7645 multval = trunc_int_for_mode (multval, mode);
7646 new_rtx = gen_rtx_MULT (mode, new_rtx, GEN_INT (multval));
7648 break;
7650 case PLUS:
7651 lhs = XEXP (x, 0);
7652 rhs = XEXP (x, 1);
7653 lhs = make_compound_operation (lhs, next_code);
7654 rhs = make_compound_operation (rhs, next_code);
7655 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7656 && SCALAR_INT_MODE_P (mode))
7658 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7659 XEXP (lhs, 1));
7660 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7662 else if (GET_CODE (lhs) == MULT
7663 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7665 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7666 simplify_gen_unary (NEG, mode,
7667 XEXP (lhs, 1),
7668 mode));
7669 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7671 else
7673 SUBST (XEXP (x, 0), lhs);
7674 SUBST (XEXP (x, 1), rhs);
7675 goto maybe_swap;
7677 x = gen_lowpart (mode, new_rtx);
7678 goto maybe_swap;
7680 case MINUS:
7681 lhs = XEXP (x, 0);
7682 rhs = XEXP (x, 1);
7683 lhs = make_compound_operation (lhs, next_code);
7684 rhs = make_compound_operation (rhs, next_code);
7685 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7686 && SCALAR_INT_MODE_P (mode))
7688 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7689 XEXP (rhs, 1));
7690 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7692 else if (GET_CODE (rhs) == MULT
7693 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7695 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7696 simplify_gen_unary (NEG, mode,
7697 XEXP (rhs, 1),
7698 mode));
7699 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7701 else
7703 SUBST (XEXP (x, 0), lhs);
7704 SUBST (XEXP (x, 1), rhs);
7705 return x;
7707 return gen_lowpart (mode, new_rtx);
7709 case AND:
7710 /* If the second operand is not a constant, we can't do anything
7711 with it. */
7712 if (!CONST_INT_P (XEXP (x, 1)))
7713 break;
7715 /* If the constant is a power of two minus one and the first operand
7716 is a logical right shift, make an extraction. */
7717 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7718 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7720 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7721 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7722 0, in_code == COMPARE);
7725 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7726 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7727 && subreg_lowpart_p (XEXP (x, 0))
7728 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7729 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7731 new_rtx = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
7732 next_code);
7733 new_rtx = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new_rtx, 0,
7734 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
7735 0, in_code == COMPARE);
7737 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7738 else if ((GET_CODE (XEXP (x, 0)) == XOR
7739 || GET_CODE (XEXP (x, 0)) == IOR)
7740 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7741 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7742 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7744 /* Apply the distributive law, and then try to make extractions. */
7745 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7746 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7747 XEXP (x, 1)),
7748 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7749 XEXP (x, 1)));
7750 new_rtx = make_compound_operation (new_rtx, in_code);
7753 /* If we are have (and (rotate X C) M) and C is larger than the number
7754 of bits in M, this is an extraction. */
7756 else if (GET_CODE (XEXP (x, 0)) == ROTATE
7757 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7758 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7759 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7761 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7762 new_rtx = make_extraction (mode, new_rtx,
7763 (GET_MODE_PRECISION (mode)
7764 - INTVAL (XEXP (XEXP (x, 0), 1))),
7765 NULL_RTX, i, 1, 0, in_code == COMPARE);
7768 /* On machines without logical shifts, if the operand of the AND is
7769 a logical shift and our mask turns off all the propagated sign
7770 bits, we can replace the logical shift with an arithmetic shift. */
7771 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7772 && !have_insn_for (LSHIFTRT, mode)
7773 && have_insn_for (ASHIFTRT, mode)
7774 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7775 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7776 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7777 && mode_width <= HOST_BITS_PER_WIDE_INT)
7779 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7781 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
7782 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
7783 SUBST (XEXP (x, 0),
7784 gen_rtx_ASHIFTRT (mode,
7785 make_compound_operation
7786 (XEXP (XEXP (x, 0), 0), next_code),
7787 XEXP (XEXP (x, 0), 1)));
7790 /* If the constant is one less than a power of two, this might be
7791 representable by an extraction even if no shift is present.
7792 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7793 we are in a COMPARE. */
7794 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7795 new_rtx = make_extraction (mode,
7796 make_compound_operation (XEXP (x, 0),
7797 next_code),
7798 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
7800 /* If we are in a comparison and this is an AND with a power of two,
7801 convert this into the appropriate bit extract. */
7802 else if (in_code == COMPARE
7803 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
7804 new_rtx = make_extraction (mode,
7805 make_compound_operation (XEXP (x, 0),
7806 next_code),
7807 i, NULL_RTX, 1, 1, 0, 1);
7809 break;
7811 case LSHIFTRT:
7812 /* If the sign bit is known to be zero, replace this with an
7813 arithmetic shift. */
7814 if (have_insn_for (ASHIFTRT, mode)
7815 && ! have_insn_for (LSHIFTRT, mode)
7816 && mode_width <= HOST_BITS_PER_WIDE_INT
7817 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
7819 new_rtx = gen_rtx_ASHIFTRT (mode,
7820 make_compound_operation (XEXP (x, 0),
7821 next_code),
7822 XEXP (x, 1));
7823 break;
7826 /* ... fall through ... */
7828 case ASHIFTRT:
7829 lhs = XEXP (x, 0);
7830 rhs = XEXP (x, 1);
7832 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7833 this is a SIGN_EXTRACT. */
7834 if (CONST_INT_P (rhs)
7835 && GET_CODE (lhs) == ASHIFT
7836 && CONST_INT_P (XEXP (lhs, 1))
7837 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
7838 && INTVAL (XEXP (lhs, 1)) >= 0
7839 && INTVAL (rhs) < mode_width)
7841 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
7842 new_rtx = make_extraction (mode, new_rtx,
7843 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
7844 NULL_RTX, mode_width - INTVAL (rhs),
7845 code == LSHIFTRT, 0, in_code == COMPARE);
7846 break;
7849 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7850 If so, try to merge the shifts into a SIGN_EXTEND. We could
7851 also do this for some cases of SIGN_EXTRACT, but it doesn't
7852 seem worth the effort; the case checked for occurs on Alpha. */
7854 if (!OBJECT_P (lhs)
7855 && ! (GET_CODE (lhs) == SUBREG
7856 && (OBJECT_P (SUBREG_REG (lhs))))
7857 && CONST_INT_P (rhs)
7858 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
7859 && INTVAL (rhs) < mode_width
7860 && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
7861 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
7862 0, NULL_RTX, mode_width - INTVAL (rhs),
7863 code == LSHIFTRT, 0, in_code == COMPARE);
7865 break;
7867 case SUBREG:
7868 /* Call ourselves recursively on the inner expression. If we are
7869 narrowing the object and it has a different RTL code from
7870 what it originally did, do this SUBREG as a force_to_mode. */
7872 rtx inner = SUBREG_REG (x), simplified;
7874 tem = make_compound_operation (inner, in_code);
7876 simplified
7877 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
7878 if (simplified)
7879 tem = simplified;
7881 if (GET_CODE (tem) != GET_CODE (inner)
7882 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7883 && subreg_lowpart_p (x))
7885 rtx newer
7886 = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
7888 /* If we have something other than a SUBREG, we might have
7889 done an expansion, so rerun ourselves. */
7890 if (GET_CODE (newer) != SUBREG)
7891 newer = make_compound_operation (newer, in_code);
7893 /* force_to_mode can expand compounds. If it just re-expanded the
7894 compound, use gen_lowpart to convert to the desired mode. */
7895 if (rtx_equal_p (newer, x)
7896 /* Likewise if it re-expanded the compound only partially.
7897 This happens for SUBREG of ZERO_EXTRACT if they extract
7898 the same number of bits. */
7899 || (GET_CODE (newer) == SUBREG
7900 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
7901 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
7902 && GET_CODE (inner) == AND
7903 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
7904 return gen_lowpart (GET_MODE (x), tem);
7906 return newer;
7909 if (simplified)
7910 return tem;
7912 break;
7914 default:
7915 break;
7918 if (new_rtx)
7920 x = gen_lowpart (mode, new_rtx);
7921 code = GET_CODE (x);
7924 /* Now recursively process each operand of this operation. We need to
7925 handle ZERO_EXTEND specially so that we don't lose track of the
7926 inner mode. */
7927 if (GET_CODE (x) == ZERO_EXTEND)
7929 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7930 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
7931 new_rtx, GET_MODE (XEXP (x, 0)));
7932 if (tem)
7933 return tem;
7934 SUBST (XEXP (x, 0), new_rtx);
7935 return x;
7938 fmt = GET_RTX_FORMAT (code);
7939 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7940 if (fmt[i] == 'e')
7942 new_rtx = make_compound_operation (XEXP (x, i), next_code);
7943 SUBST (XEXP (x, i), new_rtx);
7945 else if (fmt[i] == 'E')
7946 for (j = 0; j < XVECLEN (x, i); j++)
7948 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
7949 SUBST (XVECEXP (x, i, j), new_rtx);
7952 maybe_swap:
7953 /* If this is a commutative operation, the changes to the operands
7954 may have made it noncanonical. */
7955 if (COMMUTATIVE_ARITH_P (x)
7956 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7958 tem = XEXP (x, 0);
7959 SUBST (XEXP (x, 0), XEXP (x, 1));
7960 SUBST (XEXP (x, 1), tem);
7963 return x;
7966 /* Given M see if it is a value that would select a field of bits
7967 within an item, but not the entire word. Return -1 if not.
7968 Otherwise, return the starting position of the field, where 0 is the
7969 low-order bit.
7971 *PLEN is set to the length of the field. */
7973 static int
7974 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
7976 /* Get the bit number of the first 1 bit from the right, -1 if none. */
7977 int pos = m ? ctz_hwi (m) : -1;
7978 int len = 0;
7980 if (pos >= 0)
7981 /* Now shift off the low-order zero bits and see if we have a
7982 power of two minus 1. */
7983 len = exact_log2 ((m >> pos) + 1);
7985 if (len <= 0)
7986 pos = -1;
7988 *plen = len;
7989 return pos;
7992 /* If X refers to a register that equals REG in value, replace these
7993 references with REG. */
7994 static rtx
7995 canon_reg_for_combine (rtx x, rtx reg)
7997 rtx op0, op1, op2;
7998 const char *fmt;
7999 int i;
8000 bool copied;
8002 enum rtx_code code = GET_CODE (x);
8003 switch (GET_RTX_CLASS (code))
8005 case RTX_UNARY:
8006 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8007 if (op0 != XEXP (x, 0))
8008 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8009 GET_MODE (reg));
8010 break;
8012 case RTX_BIN_ARITH:
8013 case RTX_COMM_ARITH:
8014 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8015 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8016 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8017 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8018 break;
8020 case RTX_COMPARE:
8021 case RTX_COMM_COMPARE:
8022 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8023 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8024 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8025 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8026 GET_MODE (op0), op0, op1);
8027 break;
8029 case RTX_TERNARY:
8030 case RTX_BITFIELD_OPS:
8031 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8032 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8033 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8034 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8035 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8036 GET_MODE (op0), op0, op1, op2);
8038 case RTX_OBJ:
8039 if (REG_P (x))
8041 if (rtx_equal_p (get_last_value (reg), x)
8042 || rtx_equal_p (reg, get_last_value (x)))
8043 return reg;
8044 else
8045 break;
8048 /* fall through */
8050 default:
8051 fmt = GET_RTX_FORMAT (code);
8052 copied = false;
8053 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8054 if (fmt[i] == 'e')
8056 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8057 if (op != XEXP (x, i))
8059 if (!copied)
8061 copied = true;
8062 x = copy_rtx (x);
8064 XEXP (x, i) = op;
8067 else if (fmt[i] == 'E')
8069 int j;
8070 for (j = 0; j < XVECLEN (x, i); j++)
8072 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8073 if (op != XVECEXP (x, i, j))
8075 if (!copied)
8077 copied = true;
8078 x = copy_rtx (x);
8080 XVECEXP (x, i, j) = op;
8085 break;
8088 return x;
8091 /* Return X converted to MODE. If the value is already truncated to
8092 MODE we can just return a subreg even though in the general case we
8093 would need an explicit truncation. */
8095 static rtx
8096 gen_lowpart_or_truncate (enum machine_mode mode, rtx x)
8098 if (!CONST_INT_P (x)
8099 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
8100 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8101 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8103 /* Bit-cast X into an integer mode. */
8104 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8105 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
8106 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
8107 x, GET_MODE (x));
8110 return gen_lowpart (mode, x);
8113 /* See if X can be simplified knowing that we will only refer to it in
8114 MODE and will only refer to those bits that are nonzero in MASK.
8115 If other bits are being computed or if masking operations are done
8116 that select a superset of the bits in MASK, they can sometimes be
8117 ignored.
8119 Return a possibly simplified expression, but always convert X to
8120 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8122 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8123 are all off in X. This is used when X will be complemented, by either
8124 NOT, NEG, or XOR. */
8126 static rtx
8127 force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
8128 int just_select)
8130 enum rtx_code code = GET_CODE (x);
8131 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8132 enum machine_mode op_mode;
8133 unsigned HOST_WIDE_INT fuller_mask, nonzero;
8134 rtx op0, op1, temp;
8136 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8137 code below will do the wrong thing since the mode of such an
8138 expression is VOIDmode.
8140 Also do nothing if X is a CLOBBER; this can happen if X was
8141 the return value from a call to gen_lowpart. */
8142 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8143 return x;
8145 /* We want to perform the operation is its present mode unless we know
8146 that the operation is valid in MODE, in which case we do the operation
8147 in MODE. */
8148 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8149 && have_insn_for (code, mode))
8150 ? mode : GET_MODE (x));
8152 /* It is not valid to do a right-shift in a narrower mode
8153 than the one it came in with. */
8154 if ((code == LSHIFTRT || code == ASHIFTRT)
8155 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
8156 op_mode = GET_MODE (x);
8158 /* Truncate MASK to fit OP_MODE. */
8159 if (op_mode)
8160 mask &= GET_MODE_MASK (op_mode);
8162 /* When we have an arithmetic operation, or a shift whose count we
8163 do not know, we need to assume that all bits up to the highest-order
8164 bit in MASK will be needed. This is how we form such a mask. */
8165 if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
8166 fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
8167 else
8168 fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
8169 - 1);
8171 /* Determine what bits of X are guaranteed to be (non)zero. */
8172 nonzero = nonzero_bits (x, mode);
8174 /* If none of the bits in X are needed, return a zero. */
8175 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8176 x = const0_rtx;
8178 /* If X is a CONST_INT, return a new one. Do this here since the
8179 test below will fail. */
8180 if (CONST_INT_P (x))
8182 if (SCALAR_INT_MODE_P (mode))
8183 return gen_int_mode (INTVAL (x) & mask, mode);
8184 else
8186 x = GEN_INT (INTVAL (x) & mask);
8187 return gen_lowpart_common (mode, x);
8191 /* If X is narrower than MODE and we want all the bits in X's mode, just
8192 get X in the proper mode. */
8193 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8194 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8195 return gen_lowpart (mode, x);
8197 /* We can ignore the effect of a SUBREG if it narrows the mode or
8198 if the constant masks to zero all the bits the mode doesn't have. */
8199 if (GET_CODE (x) == SUBREG
8200 && subreg_lowpart_p (x)
8201 && ((GET_MODE_SIZE (GET_MODE (x))
8202 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8203 || (0 == (mask
8204 & GET_MODE_MASK (GET_MODE (x))
8205 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8206 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8208 /* The arithmetic simplifications here only work for scalar integer modes. */
8209 if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8210 return gen_lowpart_or_truncate (mode, x);
8212 switch (code)
8214 case CLOBBER:
8215 /* If X is a (clobber (const_int)), return it since we know we are
8216 generating something that won't match. */
8217 return x;
8219 case SIGN_EXTEND:
8220 case ZERO_EXTEND:
8221 case ZERO_EXTRACT:
8222 case SIGN_EXTRACT:
8223 x = expand_compound_operation (x);
8224 if (GET_CODE (x) != code)
8225 return force_to_mode (x, mode, mask, next_select);
8226 break;
8228 case TRUNCATE:
8229 /* Similarly for a truncate. */
8230 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8232 case AND:
8233 /* If this is an AND with a constant, convert it into an AND
8234 whose constant is the AND of that constant with MASK. If it
8235 remains an AND of MASK, delete it since it is redundant. */
8237 if (CONST_INT_P (XEXP (x, 1)))
8239 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8240 mask & INTVAL (XEXP (x, 1)));
8242 /* If X is still an AND, see if it is an AND with a mask that
8243 is just some low-order bits. If so, and it is MASK, we don't
8244 need it. */
8246 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8247 && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8248 == mask))
8249 x = XEXP (x, 0);
8251 /* If it remains an AND, try making another AND with the bits
8252 in the mode mask that aren't in MASK turned on. If the
8253 constant in the AND is wide enough, this might make a
8254 cheaper constant. */
8256 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8257 && GET_MODE_MASK (GET_MODE (x)) != mask
8258 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
8260 unsigned HOST_WIDE_INT cval
8261 = UINTVAL (XEXP (x, 1))
8262 | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8263 int width = GET_MODE_PRECISION (GET_MODE (x));
8264 rtx y;
8266 /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
8267 number, sign extend it. */
8268 if (width > 0 && width < HOST_BITS_PER_WIDE_INT
8269 && (cval & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8270 cval |= (unsigned HOST_WIDE_INT) -1 << width;
8272 y = simplify_gen_binary (AND, GET_MODE (x),
8273 XEXP (x, 0), GEN_INT (cval));
8274 if (set_src_cost (y, optimize_this_for_speed_p)
8275 < set_src_cost (x, optimize_this_for_speed_p))
8276 x = y;
8279 break;
8282 goto binop;
8284 case PLUS:
8285 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8286 low-order bits (as in an alignment operation) and FOO is already
8287 aligned to that boundary, mask C1 to that boundary as well.
8288 This may eliminate that PLUS and, later, the AND. */
8291 unsigned int width = GET_MODE_PRECISION (mode);
8292 unsigned HOST_WIDE_INT smask = mask;
8294 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8295 number, sign extend it. */
8297 if (width < HOST_BITS_PER_WIDE_INT
8298 && (smask & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8299 smask |= (unsigned HOST_WIDE_INT) (-1) << width;
8301 if (CONST_INT_P (XEXP (x, 1))
8302 && exact_log2 (- smask) >= 0
8303 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8304 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8305 return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
8306 (INTVAL (XEXP (x, 1)) & smask)),
8307 mode, smask, next_select);
8310 /* ... fall through ... */
8312 case MULT:
8313 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8314 most significant bit in MASK since carries from those bits will
8315 affect the bits we are interested in. */
8316 mask = fuller_mask;
8317 goto binop;
8319 case MINUS:
8320 /* If X is (minus C Y) where C's least set bit is larger than any bit
8321 in the mask, then we may replace with (neg Y). */
8322 if (CONST_INT_P (XEXP (x, 0))
8323 && (((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 0))
8324 & -INTVAL (XEXP (x, 0))))
8325 > mask))
8327 x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8328 GET_MODE (x));
8329 return force_to_mode (x, mode, mask, next_select);
8332 /* Similarly, if C contains every bit in the fuller_mask, then we may
8333 replace with (not Y). */
8334 if (CONST_INT_P (XEXP (x, 0))
8335 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8337 x = simplify_gen_unary (NOT, GET_MODE (x),
8338 XEXP (x, 1), GET_MODE (x));
8339 return force_to_mode (x, mode, mask, next_select);
8342 mask = fuller_mask;
8343 goto binop;
8345 case IOR:
8346 case XOR:
8347 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8348 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8349 operation which may be a bitfield extraction. Ensure that the
8350 constant we form is not wider than the mode of X. */
8352 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8353 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8354 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8355 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8356 && CONST_INT_P (XEXP (x, 1))
8357 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8358 + floor_log2 (INTVAL (XEXP (x, 1))))
8359 < GET_MODE_PRECISION (GET_MODE (x)))
8360 && (UINTVAL (XEXP (x, 1))
8361 & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8363 temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
8364 << INTVAL (XEXP (XEXP (x, 0), 1)));
8365 temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8366 XEXP (XEXP (x, 0), 0), temp);
8367 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8368 XEXP (XEXP (x, 0), 1));
8369 return force_to_mode (x, mode, mask, next_select);
8372 binop:
8373 /* For most binary operations, just propagate into the operation and
8374 change the mode if we have an operation of that mode. */
8376 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8377 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8379 /* If we ended up truncating both operands, truncate the result of the
8380 operation instead. */
8381 if (GET_CODE (op0) == TRUNCATE
8382 && GET_CODE (op1) == TRUNCATE)
8384 op0 = XEXP (op0, 0);
8385 op1 = XEXP (op1, 0);
8388 op0 = gen_lowpart_or_truncate (op_mode, op0);
8389 op1 = gen_lowpart_or_truncate (op_mode, op1);
8391 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8392 x = simplify_gen_binary (code, op_mode, op0, op1);
8393 break;
8395 case ASHIFT:
8396 /* For left shifts, do the same, but just for the first operand.
8397 However, we cannot do anything with shifts where we cannot
8398 guarantee that the counts are smaller than the size of the mode
8399 because such a count will have a different meaning in a
8400 wider mode. */
8402 if (! (CONST_INT_P (XEXP (x, 1))
8403 && INTVAL (XEXP (x, 1)) >= 0
8404 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8405 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8406 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8407 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8408 break;
8410 /* If the shift count is a constant and we can do arithmetic in
8411 the mode of the shift, refine which bits we need. Otherwise, use the
8412 conservative form of the mask. */
8413 if (CONST_INT_P (XEXP (x, 1))
8414 && INTVAL (XEXP (x, 1)) >= 0
8415 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8416 && HWI_COMPUTABLE_MODE_P (op_mode))
8417 mask >>= INTVAL (XEXP (x, 1));
8418 else
8419 mask = fuller_mask;
8421 op0 = gen_lowpart_or_truncate (op_mode,
8422 force_to_mode (XEXP (x, 0), op_mode,
8423 mask, next_select));
8425 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8426 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8427 break;
8429 case LSHIFTRT:
8430 /* Here we can only do something if the shift count is a constant,
8431 this shift constant is valid for the host, and we can do arithmetic
8432 in OP_MODE. */
8434 if (CONST_INT_P (XEXP (x, 1))
8435 && INTVAL (XEXP (x, 1)) >= 0
8436 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8437 && HWI_COMPUTABLE_MODE_P (op_mode))
8439 rtx inner = XEXP (x, 0);
8440 unsigned HOST_WIDE_INT inner_mask;
8442 /* Select the mask of the bits we need for the shift operand. */
8443 inner_mask = mask << INTVAL (XEXP (x, 1));
8445 /* We can only change the mode of the shift if we can do arithmetic
8446 in the mode of the shift and INNER_MASK is no wider than the
8447 width of X's mode. */
8448 if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8449 op_mode = GET_MODE (x);
8451 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8453 if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8454 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8457 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8458 shift and AND produces only copies of the sign bit (C2 is one less
8459 than a power of two), we can do this with just a shift. */
8461 if (GET_CODE (x) == LSHIFTRT
8462 && CONST_INT_P (XEXP (x, 1))
8463 /* The shift puts one of the sign bit copies in the least significant
8464 bit. */
8465 && ((INTVAL (XEXP (x, 1))
8466 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8467 >= GET_MODE_PRECISION (GET_MODE (x)))
8468 && exact_log2 (mask + 1) >= 0
8469 /* Number of bits left after the shift must be more than the mask
8470 needs. */
8471 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8472 <= GET_MODE_PRECISION (GET_MODE (x)))
8473 /* Must be more sign bit copies than the mask needs. */
8474 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8475 >= exact_log2 (mask + 1)))
8476 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8477 GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
8478 - exact_log2 (mask + 1)));
8480 goto shiftrt;
8482 case ASHIFTRT:
8483 /* If we are just looking for the sign bit, we don't need this shift at
8484 all, even if it has a variable count. */
8485 if (val_signbit_p (GET_MODE (x), mask))
8486 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8488 /* If this is a shift by a constant, get a mask that contains those bits
8489 that are not copies of the sign bit. We then have two cases: If
8490 MASK only includes those bits, this can be a logical shift, which may
8491 allow simplifications. If MASK is a single-bit field not within
8492 those bits, we are requesting a copy of the sign bit and hence can
8493 shift the sign bit to the appropriate location. */
8495 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8496 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8498 int i;
8500 /* If the considered data is wider than HOST_WIDE_INT, we can't
8501 represent a mask for all its bits in a single scalar.
8502 But we only care about the lower bits, so calculate these. */
8504 if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8506 nonzero = ~(unsigned HOST_WIDE_INT) 0;
8508 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8509 is the number of bits a full-width mask would have set.
8510 We need only shift if these are fewer than nonzero can
8511 hold. If not, we must keep all bits set in nonzero. */
8513 if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8514 < HOST_BITS_PER_WIDE_INT)
8515 nonzero >>= INTVAL (XEXP (x, 1))
8516 + HOST_BITS_PER_WIDE_INT
8517 - GET_MODE_PRECISION (GET_MODE (x)) ;
8519 else
8521 nonzero = GET_MODE_MASK (GET_MODE (x));
8522 nonzero >>= INTVAL (XEXP (x, 1));
8525 if ((mask & ~nonzero) == 0)
8527 x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8528 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8529 if (GET_CODE (x) != ASHIFTRT)
8530 return force_to_mode (x, mode, mask, next_select);
8533 else if ((i = exact_log2 (mask)) >= 0)
8535 x = simplify_shift_const
8536 (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8537 GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
8539 if (GET_CODE (x) != ASHIFTRT)
8540 return force_to_mode (x, mode, mask, next_select);
8544 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8545 even if the shift count isn't a constant. */
8546 if (mask == 1)
8547 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8548 XEXP (x, 0), XEXP (x, 1));
8550 shiftrt:
8552 /* If this is a zero- or sign-extension operation that just affects bits
8553 we don't care about, remove it. Be sure the call above returned
8554 something that is still a shift. */
8556 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8557 && CONST_INT_P (XEXP (x, 1))
8558 && INTVAL (XEXP (x, 1)) >= 0
8559 && (INTVAL (XEXP (x, 1))
8560 <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
8561 && GET_CODE (XEXP (x, 0)) == ASHIFT
8562 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8563 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8564 next_select);
8566 break;
8568 case ROTATE:
8569 case ROTATERT:
8570 /* If the shift count is constant and we can do computations
8571 in the mode of X, compute where the bits we care about are.
8572 Otherwise, we can't do anything. Don't change the mode of
8573 the shift or propagate MODE into the shift, though. */
8574 if (CONST_INT_P (XEXP (x, 1))
8575 && INTVAL (XEXP (x, 1)) >= 0)
8577 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8578 GET_MODE (x), GEN_INT (mask),
8579 XEXP (x, 1));
8580 if (temp && CONST_INT_P (temp))
8581 SUBST (XEXP (x, 0),
8582 force_to_mode (XEXP (x, 0), GET_MODE (x),
8583 INTVAL (temp), next_select));
8585 break;
8587 case NEG:
8588 /* If we just want the low-order bit, the NEG isn't needed since it
8589 won't change the low-order bit. */
8590 if (mask == 1)
8591 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8593 /* We need any bits less significant than the most significant bit in
8594 MASK since carries from those bits will affect the bits we are
8595 interested in. */
8596 mask = fuller_mask;
8597 goto unop;
8599 case NOT:
8600 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8601 same as the XOR case above. Ensure that the constant we form is not
8602 wider than the mode of X. */
8604 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8605 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8606 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8607 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8608 < GET_MODE_PRECISION (GET_MODE (x)))
8609 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8611 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8612 GET_MODE (x));
8613 temp = simplify_gen_binary (XOR, GET_MODE (x),
8614 XEXP (XEXP (x, 0), 0), temp);
8615 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8616 temp, XEXP (XEXP (x, 0), 1));
8618 return force_to_mode (x, mode, mask, next_select);
8621 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8622 use the full mask inside the NOT. */
8623 mask = fuller_mask;
8625 unop:
8626 op0 = gen_lowpart_or_truncate (op_mode,
8627 force_to_mode (XEXP (x, 0), mode, mask,
8628 next_select));
8629 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8630 x = simplify_gen_unary (code, op_mode, op0, op_mode);
8631 break;
8633 case NE:
8634 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8635 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8636 which is equal to STORE_FLAG_VALUE. */
8637 if ((mask & ~STORE_FLAG_VALUE) == 0
8638 && XEXP (x, 1) == const0_rtx
8639 && GET_MODE (XEXP (x, 0)) == mode
8640 && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8641 && (nonzero_bits (XEXP (x, 0), mode)
8642 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8643 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8645 break;
8647 case IF_THEN_ELSE:
8648 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8649 written in a narrower mode. We play it safe and do not do so. */
8651 SUBST (XEXP (x, 1),
8652 gen_lowpart_or_truncate (GET_MODE (x),
8653 force_to_mode (XEXP (x, 1), mode,
8654 mask, next_select)));
8655 SUBST (XEXP (x, 2),
8656 gen_lowpart_or_truncate (GET_MODE (x),
8657 force_to_mode (XEXP (x, 2), mode,
8658 mask, next_select)));
8659 break;
8661 default:
8662 break;
8665 /* Ensure we return a value of the proper mode. */
8666 return gen_lowpart_or_truncate (mode, x);
8669 /* Return nonzero if X is an expression that has one of two values depending on
8670 whether some other value is zero or nonzero. In that case, we return the
8671 value that is being tested, *PTRUE is set to the value if the rtx being
8672 returned has a nonzero value, and *PFALSE is set to the other alternative.
8674 If we return zero, we set *PTRUE and *PFALSE to X. */
8676 static rtx
8677 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8679 enum machine_mode mode = GET_MODE (x);
8680 enum rtx_code code = GET_CODE (x);
8681 rtx cond0, cond1, true0, true1, false0, false1;
8682 unsigned HOST_WIDE_INT nz;
8684 /* If we are comparing a value against zero, we are done. */
8685 if ((code == NE || code == EQ)
8686 && XEXP (x, 1) == const0_rtx)
8688 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8689 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8690 return XEXP (x, 0);
8693 /* If this is a unary operation whose operand has one of two values, apply
8694 our opcode to compute those values. */
8695 else if (UNARY_P (x)
8696 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8698 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8699 *pfalse = simplify_gen_unary (code, mode, false0,
8700 GET_MODE (XEXP (x, 0)));
8701 return cond0;
8704 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8705 make can't possibly match and would suppress other optimizations. */
8706 else if (code == COMPARE)
8709 /* If this is a binary operation, see if either side has only one of two
8710 values. If either one does or if both do and they are conditional on
8711 the same value, compute the new true and false values. */
8712 else if (BINARY_P (x))
8714 cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8715 cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8717 if ((cond0 != 0 || cond1 != 0)
8718 && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8720 /* If if_then_else_cond returned zero, then true/false are the
8721 same rtl. We must copy one of them to prevent invalid rtl
8722 sharing. */
8723 if (cond0 == 0)
8724 true0 = copy_rtx (true0);
8725 else if (cond1 == 0)
8726 true1 = copy_rtx (true1);
8728 if (COMPARISON_P (x))
8730 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8731 true0, true1);
8732 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8733 false0, false1);
8735 else
8737 *ptrue = simplify_gen_binary (code, mode, true0, true1);
8738 *pfalse = simplify_gen_binary (code, mode, false0, false1);
8741 return cond0 ? cond0 : cond1;
8744 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8745 operands is zero when the other is nonzero, and vice-versa,
8746 and STORE_FLAG_VALUE is 1 or -1. */
8748 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8749 && (code == PLUS || code == IOR || code == XOR || code == MINUS
8750 || code == UMAX)
8751 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8753 rtx op0 = XEXP (XEXP (x, 0), 1);
8754 rtx op1 = XEXP (XEXP (x, 1), 1);
8756 cond0 = XEXP (XEXP (x, 0), 0);
8757 cond1 = XEXP (XEXP (x, 1), 0);
8759 if (COMPARISON_P (cond0)
8760 && COMPARISON_P (cond1)
8761 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8762 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8763 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8764 || ((swap_condition (GET_CODE (cond0))
8765 == reversed_comparison_code (cond1, NULL))
8766 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8767 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8768 && ! side_effects_p (x))
8770 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
8771 *pfalse = simplify_gen_binary (MULT, mode,
8772 (code == MINUS
8773 ? simplify_gen_unary (NEG, mode,
8774 op1, mode)
8775 : op1),
8776 const_true_rtx);
8777 return cond0;
8781 /* Similarly for MULT, AND and UMIN, except that for these the result
8782 is always zero. */
8783 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8784 && (code == MULT || code == AND || code == UMIN)
8785 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8787 cond0 = XEXP (XEXP (x, 0), 0);
8788 cond1 = XEXP (XEXP (x, 1), 0);
8790 if (COMPARISON_P (cond0)
8791 && COMPARISON_P (cond1)
8792 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8793 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8794 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8795 || ((swap_condition (GET_CODE (cond0))
8796 == reversed_comparison_code (cond1, NULL))
8797 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8798 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8799 && ! side_effects_p (x))
8801 *ptrue = *pfalse = const0_rtx;
8802 return cond0;
8807 else if (code == IF_THEN_ELSE)
8809 /* If we have IF_THEN_ELSE already, extract the condition and
8810 canonicalize it if it is NE or EQ. */
8811 cond0 = XEXP (x, 0);
8812 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
8813 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
8814 return XEXP (cond0, 0);
8815 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
8817 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
8818 return XEXP (cond0, 0);
8820 else
8821 return cond0;
8824 /* If X is a SUBREG, we can narrow both the true and false values
8825 if the inner expression, if there is a condition. */
8826 else if (code == SUBREG
8827 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
8828 &true0, &false0)))
8830 true0 = simplify_gen_subreg (mode, true0,
8831 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8832 false0 = simplify_gen_subreg (mode, false0,
8833 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8834 if (true0 && false0)
8836 *ptrue = true0;
8837 *pfalse = false0;
8838 return cond0;
8842 /* If X is a constant, this isn't special and will cause confusions
8843 if we treat it as such. Likewise if it is equivalent to a constant. */
8844 else if (CONSTANT_P (x)
8845 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
8848 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8849 will be least confusing to the rest of the compiler. */
8850 else if (mode == BImode)
8852 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
8853 return x;
8856 /* If X is known to be either 0 or -1, those are the true and
8857 false values when testing X. */
8858 else if (x == constm1_rtx || x == const0_rtx
8859 || (mode != VOIDmode
8860 && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
8862 *ptrue = constm1_rtx, *pfalse = const0_rtx;
8863 return x;
8866 /* Likewise for 0 or a single bit. */
8867 else if (HWI_COMPUTABLE_MODE_P (mode)
8868 && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
8870 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
8871 return x;
8874 /* Otherwise fail; show no condition with true and false values the same. */
8875 *ptrue = *pfalse = x;
8876 return 0;
8879 /* Return the value of expression X given the fact that condition COND
8880 is known to be true when applied to REG as its first operand and VAL
8881 as its second. X is known to not be shared and so can be modified in
8882 place.
8884 We only handle the simplest cases, and specifically those cases that
8885 arise with IF_THEN_ELSE expressions. */
8887 static rtx
8888 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
8890 enum rtx_code code = GET_CODE (x);
8891 rtx temp;
8892 const char *fmt;
8893 int i, j;
8895 if (side_effects_p (x))
8896 return x;
8898 /* If either operand of the condition is a floating point value,
8899 then we have to avoid collapsing an EQ comparison. */
8900 if (cond == EQ
8901 && rtx_equal_p (x, reg)
8902 && ! FLOAT_MODE_P (GET_MODE (x))
8903 && ! FLOAT_MODE_P (GET_MODE (val)))
8904 return val;
8906 if (cond == UNEQ && rtx_equal_p (x, reg))
8907 return val;
8909 /* If X is (abs REG) and we know something about REG's relationship
8910 with zero, we may be able to simplify this. */
8912 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
8913 switch (cond)
8915 case GE: case GT: case EQ:
8916 return XEXP (x, 0);
8917 case LT: case LE:
8918 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
8919 XEXP (x, 0),
8920 GET_MODE (XEXP (x, 0)));
8921 default:
8922 break;
8925 /* The only other cases we handle are MIN, MAX, and comparisons if the
8926 operands are the same as REG and VAL. */
8928 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
8930 if (rtx_equal_p (XEXP (x, 0), val))
8931 cond = swap_condition (cond), temp = val, val = reg, reg = temp;
8933 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
8935 if (COMPARISON_P (x))
8937 if (comparison_dominates_p (cond, code))
8938 return const_true_rtx;
8940 code = reversed_comparison_code (x, NULL);
8941 if (code != UNKNOWN
8942 && comparison_dominates_p (cond, code))
8943 return const0_rtx;
8944 else
8945 return x;
8947 else if (code == SMAX || code == SMIN
8948 || code == UMIN || code == UMAX)
8950 int unsignedp = (code == UMIN || code == UMAX);
8952 /* Do not reverse the condition when it is NE or EQ.
8953 This is because we cannot conclude anything about
8954 the value of 'SMAX (x, y)' when x is not equal to y,
8955 but we can when x equals y. */
8956 if ((code == SMAX || code == UMAX)
8957 && ! (cond == EQ || cond == NE))
8958 cond = reverse_condition (cond);
8960 switch (cond)
8962 case GE: case GT:
8963 return unsignedp ? x : XEXP (x, 1);
8964 case LE: case LT:
8965 return unsignedp ? x : XEXP (x, 0);
8966 case GEU: case GTU:
8967 return unsignedp ? XEXP (x, 1) : x;
8968 case LEU: case LTU:
8969 return unsignedp ? XEXP (x, 0) : x;
8970 default:
8971 break;
8976 else if (code == SUBREG)
8978 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
8979 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
8981 if (SUBREG_REG (x) != r)
8983 /* We must simplify subreg here, before we lose track of the
8984 original inner_mode. */
8985 new_rtx = simplify_subreg (GET_MODE (x), r,
8986 inner_mode, SUBREG_BYTE (x));
8987 if (new_rtx)
8988 return new_rtx;
8989 else
8990 SUBST (SUBREG_REG (x), r);
8993 return x;
8995 /* We don't have to handle SIGN_EXTEND here, because even in the
8996 case of replacing something with a modeless CONST_INT, a
8997 CONST_INT is already (supposed to be) a valid sign extension for
8998 its narrower mode, which implies it's already properly
8999 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9000 story is different. */
9001 else if (code == ZERO_EXTEND)
9003 enum machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9004 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9006 if (XEXP (x, 0) != r)
9008 /* We must simplify the zero_extend here, before we lose
9009 track of the original inner_mode. */
9010 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9011 r, inner_mode);
9012 if (new_rtx)
9013 return new_rtx;
9014 else
9015 SUBST (XEXP (x, 0), r);
9018 return x;
9021 fmt = GET_RTX_FORMAT (code);
9022 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9024 if (fmt[i] == 'e')
9025 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9026 else if (fmt[i] == 'E')
9027 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9028 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9029 cond, reg, val));
9032 return x;
9035 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9036 assignment as a field assignment. */
9038 static int
9039 rtx_equal_for_field_assignment_p (rtx x, rtx y)
9041 if (x == y || rtx_equal_p (x, y))
9042 return 1;
9044 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9045 return 0;
9047 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9048 Note that all SUBREGs of MEM are paradoxical; otherwise they
9049 would have been rewritten. */
9050 if (MEM_P (x) && GET_CODE (y) == SUBREG
9051 && MEM_P (SUBREG_REG (y))
9052 && rtx_equal_p (SUBREG_REG (y),
9053 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9054 return 1;
9056 if (MEM_P (y) && GET_CODE (x) == SUBREG
9057 && MEM_P (SUBREG_REG (x))
9058 && rtx_equal_p (SUBREG_REG (x),
9059 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9060 return 1;
9062 /* We used to see if get_last_value of X and Y were the same but that's
9063 not correct. In one direction, we'll cause the assignment to have
9064 the wrong destination and in the case, we'll import a register into this
9065 insn that might have already have been dead. So fail if none of the
9066 above cases are true. */
9067 return 0;
9070 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9071 Return that assignment if so.
9073 We only handle the most common cases. */
9075 static rtx
9076 make_field_assignment (rtx x)
9078 rtx dest = SET_DEST (x);
9079 rtx src = SET_SRC (x);
9080 rtx assign;
9081 rtx rhs, lhs;
9082 HOST_WIDE_INT c1;
9083 HOST_WIDE_INT pos;
9084 unsigned HOST_WIDE_INT len;
9085 rtx other;
9086 enum machine_mode mode;
9088 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9089 a clear of a one-bit field. We will have changed it to
9090 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9091 for a SUBREG. */
9093 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9094 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9095 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9096 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9098 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9099 1, 1, 1, 0);
9100 if (assign != 0)
9101 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
9102 return x;
9105 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9106 && subreg_lowpart_p (XEXP (src, 0))
9107 && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
9108 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
9109 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9110 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9111 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9112 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9114 assign = make_extraction (VOIDmode, dest, 0,
9115 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9116 1, 1, 1, 0);
9117 if (assign != 0)
9118 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
9119 return x;
9122 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9123 one-bit field. */
9124 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9125 && XEXP (XEXP (src, 0), 0) == const1_rtx
9126 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9128 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9129 1, 1, 1, 0);
9130 if (assign != 0)
9131 return gen_rtx_SET (VOIDmode, assign, const1_rtx);
9132 return x;
9135 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9136 SRC is an AND with all bits of that field set, then we can discard
9137 the AND. */
9138 if (GET_CODE (dest) == ZERO_EXTRACT
9139 && CONST_INT_P (XEXP (dest, 1))
9140 && GET_CODE (src) == AND
9141 && CONST_INT_P (XEXP (src, 1)))
9143 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9144 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9145 unsigned HOST_WIDE_INT ze_mask;
9147 if (width >= HOST_BITS_PER_WIDE_INT)
9148 ze_mask = -1;
9149 else
9150 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9152 /* Complete overlap. We can remove the source AND. */
9153 if ((and_mask & ze_mask) == ze_mask)
9154 return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));
9156 /* Partial overlap. We can reduce the source AND. */
9157 if ((and_mask & ze_mask) != and_mask)
9159 mode = GET_MODE (src);
9160 src = gen_rtx_AND (mode, XEXP (src, 0),
9161 gen_int_mode (and_mask & ze_mask, mode));
9162 return gen_rtx_SET (VOIDmode, dest, src);
9166 /* The other case we handle is assignments into a constant-position
9167 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9168 a mask that has all one bits except for a group of zero bits and
9169 OTHER is known to have zeros where C1 has ones, this is such an
9170 assignment. Compute the position and length from C1. Shift OTHER
9171 to the appropriate position, force it to the required mode, and
9172 make the extraction. Check for the AND in both operands. */
9174 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9175 return x;
9177 rhs = expand_compound_operation (XEXP (src, 0));
9178 lhs = expand_compound_operation (XEXP (src, 1));
9180 if (GET_CODE (rhs) == AND
9181 && CONST_INT_P (XEXP (rhs, 1))
9182 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9183 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9184 else if (GET_CODE (lhs) == AND
9185 && CONST_INT_P (XEXP (lhs, 1))
9186 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9187 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9188 else
9189 return x;
9191 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9192 if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
9193 || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9194 || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9195 return x;
9197 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9198 if (assign == 0)
9199 return x;
9201 /* The mode to use for the source is the mode of the assignment, or of
9202 what is inside a possible STRICT_LOW_PART. */
9203 mode = (GET_CODE (assign) == STRICT_LOW_PART
9204 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9206 /* Shift OTHER right POS places and make it the source, restricting it
9207 to the proper length and mode. */
9209 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9210 GET_MODE (src),
9211 other, pos),
9212 dest);
9213 src = force_to_mode (src, mode,
9214 GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
9215 ? ~(unsigned HOST_WIDE_INT) 0
9216 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9219 /* If SRC is masked by an AND that does not make a difference in
9220 the value being stored, strip it. */
9221 if (GET_CODE (assign) == ZERO_EXTRACT
9222 && CONST_INT_P (XEXP (assign, 1))
9223 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9224 && GET_CODE (src) == AND
9225 && CONST_INT_P (XEXP (src, 1))
9226 && UINTVAL (XEXP (src, 1))
9227 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9228 src = XEXP (src, 0);
9230 return gen_rtx_SET (VOIDmode, assign, src);
9233 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9234 if so. */
9236 static rtx
9237 apply_distributive_law (rtx x)
9239 enum rtx_code code = GET_CODE (x);
9240 enum rtx_code inner_code;
9241 rtx lhs, rhs, other;
9242 rtx tem;
9244 /* Distributivity is not true for floating point as it can change the
9245 value. So we don't do it unless -funsafe-math-optimizations. */
9246 if (FLOAT_MODE_P (GET_MODE (x))
9247 && ! flag_unsafe_math_optimizations)
9248 return x;
9250 /* The outer operation can only be one of the following: */
9251 if (code != IOR && code != AND && code != XOR
9252 && code != PLUS && code != MINUS)
9253 return x;
9255 lhs = XEXP (x, 0);
9256 rhs = XEXP (x, 1);
9258 /* If either operand is a primitive we can't do anything, so get out
9259 fast. */
9260 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9261 return x;
9263 lhs = expand_compound_operation (lhs);
9264 rhs = expand_compound_operation (rhs);
9265 inner_code = GET_CODE (lhs);
9266 if (inner_code != GET_CODE (rhs))
9267 return x;
9269 /* See if the inner and outer operations distribute. */
9270 switch (inner_code)
9272 case LSHIFTRT:
9273 case ASHIFTRT:
9274 case AND:
9275 case IOR:
9276 /* These all distribute except over PLUS. */
9277 if (code == PLUS || code == MINUS)
9278 return x;
9279 break;
9281 case MULT:
9282 if (code != PLUS && code != MINUS)
9283 return x;
9284 break;
9286 case ASHIFT:
9287 /* This is also a multiply, so it distributes over everything. */
9288 break;
9290 /* This used to handle SUBREG, but this turned out to be counter-
9291 productive, since (subreg (op ...)) usually is not handled by
9292 insn patterns, and this "optimization" therefore transformed
9293 recognizable patterns into unrecognizable ones. Therefore the
9294 SUBREG case was removed from here.
9296 It is possible that distributing SUBREG over arithmetic operations
9297 leads to an intermediate result than can then be optimized further,
9298 e.g. by moving the outer SUBREG to the other side of a SET as done
9299 in simplify_set. This seems to have been the original intent of
9300 handling SUBREGs here.
9302 However, with current GCC this does not appear to actually happen,
9303 at least on major platforms. If some case is found where removing
9304 the SUBREG case here prevents follow-on optimizations, distributing
9305 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9307 default:
9308 return x;
9311 /* Set LHS and RHS to the inner operands (A and B in the example
9312 above) and set OTHER to the common operand (C in the example).
9313 There is only one way to do this unless the inner operation is
9314 commutative. */
9315 if (COMMUTATIVE_ARITH_P (lhs)
9316 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9317 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9318 else if (COMMUTATIVE_ARITH_P (lhs)
9319 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9320 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9321 else if (COMMUTATIVE_ARITH_P (lhs)
9322 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9323 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9324 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9325 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9326 else
9327 return x;
9329 /* Form the new inner operation, seeing if it simplifies first. */
9330 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9332 /* There is one exception to the general way of distributing:
9333 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9334 if (code == XOR && inner_code == IOR)
9336 inner_code = AND;
9337 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9340 /* We may be able to continuing distributing the result, so call
9341 ourselves recursively on the inner operation before forming the
9342 outer operation, which we return. */
9343 return simplify_gen_binary (inner_code, GET_MODE (x),
9344 apply_distributive_law (tem), other);
9347 /* See if X is of the form (* (+ A B) C), and if so convert to
9348 (+ (* A C) (* B C)) and try to simplify.
9350 Most of the time, this results in no change. However, if some of
9351 the operands are the same or inverses of each other, simplifications
9352 will result.
9354 For example, (and (ior A B) (not B)) can occur as the result of
9355 expanding a bit field assignment. When we apply the distributive
9356 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9357 which then simplifies to (and (A (not B))).
9359 Note that no checks happen on the validity of applying the inverse
9360 distributive law. This is pointless since we can do it in the
9361 few places where this routine is called.
9363 N is the index of the term that is decomposed (the arithmetic operation,
9364 i.e. (+ A B) in the first example above). !N is the index of the term that
9365 is distributed, i.e. of C in the first example above. */
9366 static rtx
9367 distribute_and_simplify_rtx (rtx x, int n)
9369 enum machine_mode mode;
9370 enum rtx_code outer_code, inner_code;
9371 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9373 /* Distributivity is not true for floating point as it can change the
9374 value. So we don't do it unless -funsafe-math-optimizations. */
9375 if (FLOAT_MODE_P (GET_MODE (x))
9376 && ! flag_unsafe_math_optimizations)
9377 return NULL_RTX;
9379 decomposed = XEXP (x, n);
9380 if (!ARITHMETIC_P (decomposed))
9381 return NULL_RTX;
9383 mode = GET_MODE (x);
9384 outer_code = GET_CODE (x);
9385 distributed = XEXP (x, !n);
9387 inner_code = GET_CODE (decomposed);
9388 inner_op0 = XEXP (decomposed, 0);
9389 inner_op1 = XEXP (decomposed, 1);
9391 /* Special case (and (xor B C) (not A)), which is equivalent to
9392 (xor (ior A B) (ior A C)) */
9393 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9395 distributed = XEXP (distributed, 0);
9396 outer_code = IOR;
9399 if (n == 0)
9401 /* Distribute the second term. */
9402 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9403 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9405 else
9407 /* Distribute the first term. */
9408 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9409 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9412 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9413 new_op0, new_op1));
9414 if (GET_CODE (tmp) != outer_code
9415 && (set_src_cost (tmp, optimize_this_for_speed_p)
9416 < set_src_cost (x, optimize_this_for_speed_p)))
9417 return tmp;
9419 return NULL_RTX;
9422 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9423 in MODE. Return an equivalent form, if different from (and VAROP
9424 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9426 static rtx
9427 simplify_and_const_int_1 (enum machine_mode mode, rtx varop,
9428 unsigned HOST_WIDE_INT constop)
9430 unsigned HOST_WIDE_INT nonzero;
9431 unsigned HOST_WIDE_INT orig_constop;
9432 rtx orig_varop;
9433 int i;
9435 orig_varop = varop;
9436 orig_constop = constop;
9437 if (GET_CODE (varop) == CLOBBER)
9438 return NULL_RTX;
9440 /* Simplify VAROP knowing that we will be only looking at some of the
9441 bits in it.
9443 Note by passing in CONSTOP, we guarantee that the bits not set in
9444 CONSTOP are not significant and will never be examined. We must
9445 ensure that is the case by explicitly masking out those bits
9446 before returning. */
9447 varop = force_to_mode (varop, mode, constop, 0);
9449 /* If VAROP is a CLOBBER, we will fail so return it. */
9450 if (GET_CODE (varop) == CLOBBER)
9451 return varop;
9453 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9454 to VAROP and return the new constant. */
9455 if (CONST_INT_P (varop))
9456 return gen_int_mode (INTVAL (varop) & constop, mode);
9458 /* See what bits may be nonzero in VAROP. Unlike the general case of
9459 a call to nonzero_bits, here we don't care about bits outside
9460 MODE. */
9462 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9464 /* Turn off all bits in the constant that are known to already be zero.
9465 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9466 which is tested below. */
9468 constop &= nonzero;
9470 /* If we don't have any bits left, return zero. */
9471 if (constop == 0)
9472 return const0_rtx;
9474 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9475 a power of two, we can replace this with an ASHIFT. */
9476 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9477 && (i = exact_log2 (constop)) >= 0)
9478 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9480 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9481 or XOR, then try to apply the distributive law. This may eliminate
9482 operations if either branch can be simplified because of the AND.
9483 It may also make some cases more complex, but those cases probably
9484 won't match a pattern either with or without this. */
9486 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9487 return
9488 gen_lowpart
9489 (mode,
9490 apply_distributive_law
9491 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9492 simplify_and_const_int (NULL_RTX,
9493 GET_MODE (varop),
9494 XEXP (varop, 0),
9495 constop),
9496 simplify_and_const_int (NULL_RTX,
9497 GET_MODE (varop),
9498 XEXP (varop, 1),
9499 constop))));
9501 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9502 the AND and see if one of the operands simplifies to zero. If so, we
9503 may eliminate it. */
9505 if (GET_CODE (varop) == PLUS
9506 && exact_log2 (constop + 1) >= 0)
9508 rtx o0, o1;
9510 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9511 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9512 if (o0 == const0_rtx)
9513 return o1;
9514 if (o1 == const0_rtx)
9515 return o0;
9518 /* Make a SUBREG if necessary. If we can't make it, fail. */
9519 varop = gen_lowpart (mode, varop);
9520 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9521 return NULL_RTX;
9523 /* If we are only masking insignificant bits, return VAROP. */
9524 if (constop == nonzero)
9525 return varop;
9527 if (varop == orig_varop && constop == orig_constop)
9528 return NULL_RTX;
9530 /* Otherwise, return an AND. */
9531 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9535 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9536 in MODE.
9538 Return an equivalent form, if different from X. Otherwise, return X. If
9539 X is zero, we are to always construct the equivalent form. */
9541 static rtx
9542 simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop,
9543 unsigned HOST_WIDE_INT constop)
9545 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9546 if (tem)
9547 return tem;
9549 if (!x)
9550 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9551 gen_int_mode (constop, mode));
9552 if (GET_MODE (x) != mode)
9553 x = gen_lowpart (mode, x);
9554 return x;
9557 /* Given a REG, X, compute which bits in X can be nonzero.
9558 We don't care about bits outside of those defined in MODE.
9560 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9561 a shift, AND, or zero_extract, we can do better. */
9563 static rtx
9564 reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode,
9565 const_rtx known_x ATTRIBUTE_UNUSED,
9566 enum machine_mode known_mode ATTRIBUTE_UNUSED,
9567 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9568 unsigned HOST_WIDE_INT *nonzero)
9570 rtx tem;
9571 reg_stat_type *rsp;
9573 /* If X is a register whose nonzero bits value is current, use it.
9574 Otherwise, if X is a register whose value we can find, use that
9575 value. Otherwise, use the previously-computed global nonzero bits
9576 for this register. */
9578 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
9579 if (rsp->last_set_value != 0
9580 && (rsp->last_set_mode == mode
9581 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9582 && GET_MODE_CLASS (mode) == MODE_INT))
9583 && ((rsp->last_set_label >= label_tick_ebb_start
9584 && rsp->last_set_label < label_tick)
9585 || (rsp->last_set_label == label_tick
9586 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9587 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9588 && REG_N_SETS (REGNO (x)) == 1
9589 && !REGNO_REG_SET_P
9590 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9592 *nonzero &= rsp->last_set_nonzero_bits;
9593 return NULL;
9596 tem = get_last_value (x);
9598 if (tem)
9600 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9601 /* If X is narrower than MODE and TEM is a non-negative
9602 constant that would appear negative in the mode of X,
9603 sign-extend it for use in reg_nonzero_bits because some
9604 machines (maybe most) will actually do the sign-extension
9605 and this is the conservative approach.
9607 ??? For 2.5, try to tighten up the MD files in this regard
9608 instead of this kludge. */
9610 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode)
9611 && CONST_INT_P (tem)
9612 && INTVAL (tem) > 0
9613 && val_signbit_known_set_p (GET_MODE (x), INTVAL (tem)))
9614 tem = GEN_INT (INTVAL (tem) | ~GET_MODE_MASK (GET_MODE (x)));
9615 #endif
9616 return tem;
9618 else if (nonzero_sign_valid && rsp->nonzero_bits)
9620 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9622 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
9623 /* We don't know anything about the upper bits. */
9624 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9625 *nonzero &= mask;
9628 return NULL;
9631 /* Return the number of bits at the high-order end of X that are known to
9632 be equal to the sign bit. X will be used in mode MODE; if MODE is
9633 VOIDmode, X will be used in its own mode. The returned value will always
9634 be between 1 and the number of bits in MODE. */
9636 static rtx
9637 reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode,
9638 const_rtx known_x ATTRIBUTE_UNUSED,
9639 enum machine_mode known_mode
9640 ATTRIBUTE_UNUSED,
9641 unsigned int known_ret ATTRIBUTE_UNUSED,
9642 unsigned int *result)
9644 rtx tem;
9645 reg_stat_type *rsp;
9647 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
9648 if (rsp->last_set_value != 0
9649 && rsp->last_set_mode == mode
9650 && ((rsp->last_set_label >= label_tick_ebb_start
9651 && rsp->last_set_label < label_tick)
9652 || (rsp->last_set_label == label_tick
9653 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9654 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9655 && REG_N_SETS (REGNO (x)) == 1
9656 && !REGNO_REG_SET_P
9657 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9659 *result = rsp->last_set_sign_bit_copies;
9660 return NULL;
9663 tem = get_last_value (x);
9664 if (tem != 0)
9665 return tem;
9667 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9668 && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
9669 *result = rsp->sign_bit_copies;
9671 return NULL;
9674 /* Return the number of "extended" bits there are in X, when interpreted
9675 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9676 unsigned quantities, this is the number of high-order zero bits.
9677 For signed quantities, this is the number of copies of the sign bit
9678 minus 1. In both case, this function returns the number of "spare"
9679 bits. For example, if two quantities for which this function returns
9680 at least 1 are added, the addition is known not to overflow.
9682 This function will always return 0 unless called during combine, which
9683 implies that it must be called from a define_split. */
9685 unsigned int
9686 extended_count (const_rtx x, enum machine_mode mode, int unsignedp)
9688 if (nonzero_sign_valid == 0)
9689 return 0;
9691 return (unsignedp
9692 ? (HWI_COMPUTABLE_MODE_P (mode)
9693 ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
9694 - floor_log2 (nonzero_bits (x, mode)))
9695 : 0)
9696 : num_sign_bit_copies (x, mode) - 1);
9699 /* This function is called from `simplify_shift_const' to merge two
9700 outer operations. Specifically, we have already found that we need
9701 to perform operation *POP0 with constant *PCONST0 at the outermost
9702 position. We would now like to also perform OP1 with constant CONST1
9703 (with *POP0 being done last).
9705 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9706 the resulting operation. *PCOMP_P is set to 1 if we would need to
9707 complement the innermost operand, otherwise it is unchanged.
9709 MODE is the mode in which the operation will be done. No bits outside
9710 the width of this mode matter. It is assumed that the width of this mode
9711 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9713 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9714 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9715 result is simply *PCONST0.
9717 If the resulting operation cannot be expressed as one operation, we
9718 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9720 static int
9721 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, enum machine_mode mode, int *pcomp_p)
9723 enum rtx_code op0 = *pop0;
9724 HOST_WIDE_INT const0 = *pconst0;
9726 const0 &= GET_MODE_MASK (mode);
9727 const1 &= GET_MODE_MASK (mode);
9729 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9730 if (op0 == AND)
9731 const1 &= const0;
9733 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9734 if OP0 is SET. */
9736 if (op1 == UNKNOWN || op0 == SET)
9737 return 1;
9739 else if (op0 == UNKNOWN)
9740 op0 = op1, const0 = const1;
9742 else if (op0 == op1)
9744 switch (op0)
9746 case AND:
9747 const0 &= const1;
9748 break;
9749 case IOR:
9750 const0 |= const1;
9751 break;
9752 case XOR:
9753 const0 ^= const1;
9754 break;
9755 case PLUS:
9756 const0 += const1;
9757 break;
9758 case NEG:
9759 op0 = UNKNOWN;
9760 break;
9761 default:
9762 break;
9766 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9767 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
9768 return 0;
9770 /* If the two constants aren't the same, we can't do anything. The
9771 remaining six cases can all be done. */
9772 else if (const0 != const1)
9773 return 0;
9775 else
9776 switch (op0)
9778 case IOR:
9779 if (op1 == AND)
9780 /* (a & b) | b == b */
9781 op0 = SET;
9782 else /* op1 == XOR */
9783 /* (a ^ b) | b == a | b */
9785 break;
9787 case XOR:
9788 if (op1 == AND)
9789 /* (a & b) ^ b == (~a) & b */
9790 op0 = AND, *pcomp_p = 1;
9791 else /* op1 == IOR */
9792 /* (a | b) ^ b == a & ~b */
9793 op0 = AND, const0 = ~const0;
9794 break;
9796 case AND:
9797 if (op1 == IOR)
9798 /* (a | b) & b == b */
9799 op0 = SET;
9800 else /* op1 == XOR */
9801 /* (a ^ b) & b) == (~a) & b */
9802 *pcomp_p = 1;
9803 break;
9804 default:
9805 break;
9808 /* Check for NO-OP cases. */
9809 const0 &= GET_MODE_MASK (mode);
9810 if (const0 == 0
9811 && (op0 == IOR || op0 == XOR || op0 == PLUS))
9812 op0 = UNKNOWN;
9813 else if (const0 == 0 && op0 == AND)
9814 op0 = SET;
9815 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
9816 && op0 == AND)
9817 op0 = UNKNOWN;
9819 *pop0 = op0;
9821 /* ??? Slightly redundant with the above mask, but not entirely.
9822 Moving this above means we'd have to sign-extend the mode mask
9823 for the final test. */
9824 if (op0 != UNKNOWN && op0 != NEG)
9825 *pconst0 = trunc_int_for_mode (const0, mode);
9827 return 1;
9830 /* A helper to simplify_shift_const_1 to determine the mode we can perform
9831 the shift in. The original shift operation CODE is performed on OP in
9832 ORIG_MODE. Return the wider mode MODE if we can perform the operation
9833 in that mode. Return ORIG_MODE otherwise. We can also assume that the
9834 result of the shift is subject to operation OUTER_CODE with operand
9835 OUTER_CONST. */
9837 static enum machine_mode
9838 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
9839 enum machine_mode orig_mode, enum machine_mode mode,
9840 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
9842 if (orig_mode == mode)
9843 return mode;
9844 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
9846 /* In general we can't perform in wider mode for right shift and rotate. */
9847 switch (code)
9849 case ASHIFTRT:
9850 /* We can still widen if the bits brought in from the left are identical
9851 to the sign bit of ORIG_MODE. */
9852 if (num_sign_bit_copies (op, mode)
9853 > (unsigned) (GET_MODE_PRECISION (mode)
9854 - GET_MODE_PRECISION (orig_mode)))
9855 return mode;
9856 return orig_mode;
9858 case LSHIFTRT:
9859 /* Similarly here but with zero bits. */
9860 if (HWI_COMPUTABLE_MODE_P (mode)
9861 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
9862 return mode;
9864 /* We can also widen if the bits brought in will be masked off. This
9865 operation is performed in ORIG_MODE. */
9866 if (outer_code == AND)
9868 int care_bits = low_bitmask_len (orig_mode, outer_const);
9870 if (care_bits >= 0
9871 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
9872 return mode;
9874 /* fall through */
9876 case ROTATE:
9877 return orig_mode;
9879 case ROTATERT:
9880 gcc_unreachable ();
9882 default:
9883 return mode;
9887 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
9888 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
9889 if we cannot simplify it. Otherwise, return a simplified value.
9891 The shift is normally computed in the widest mode we find in VAROP, as
9892 long as it isn't a different number of words than RESULT_MODE. Exceptions
9893 are ASHIFTRT and ROTATE, which are always done in their original mode. */
9895 static rtx
9896 simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
9897 rtx varop, int orig_count)
9899 enum rtx_code orig_code = code;
9900 rtx orig_varop = varop;
9901 int count;
9902 enum machine_mode mode = result_mode;
9903 enum machine_mode shift_mode, tmode;
9904 unsigned int mode_words
9905 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
9906 /* We form (outer_op (code varop count) (outer_const)). */
9907 enum rtx_code outer_op = UNKNOWN;
9908 HOST_WIDE_INT outer_const = 0;
9909 int complement_p = 0;
9910 rtx new_rtx, x;
9912 /* Make sure and truncate the "natural" shift on the way in. We don't
9913 want to do this inside the loop as it makes it more difficult to
9914 combine shifts. */
9915 if (SHIFT_COUNT_TRUNCATED)
9916 orig_count &= GET_MODE_BITSIZE (mode) - 1;
9918 /* If we were given an invalid count, don't do anything except exactly
9919 what was requested. */
9921 if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode))
9922 return NULL_RTX;
9924 count = orig_count;
9926 /* Unless one of the branches of the `if' in this loop does a `continue',
9927 we will `break' the loop after the `if'. */
9929 while (count != 0)
9931 /* If we have an operand of (clobber (const_int 0)), fail. */
9932 if (GET_CODE (varop) == CLOBBER)
9933 return NULL_RTX;
9935 /* Convert ROTATERT to ROTATE. */
9936 if (code == ROTATERT)
9938 unsigned int bitsize = GET_MODE_PRECISION (result_mode);
9939 code = ROTATE;
9940 if (VECTOR_MODE_P (result_mode))
9941 count = bitsize / GET_MODE_NUNITS (result_mode) - count;
9942 else
9943 count = bitsize - count;
9946 shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
9947 mode, outer_op, outer_const);
9949 /* Handle cases where the count is greater than the size of the mode
9950 minus 1. For ASHIFT, use the size minus one as the count (this can
9951 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
9952 take the count modulo the size. For other shifts, the result is
9953 zero.
9955 Since these shifts are being produced by the compiler by combining
9956 multiple operations, each of which are defined, we know what the
9957 result is supposed to be. */
9959 if (count > (GET_MODE_PRECISION (shift_mode) - 1))
9961 if (code == ASHIFTRT)
9962 count = GET_MODE_PRECISION (shift_mode) - 1;
9963 else if (code == ROTATE || code == ROTATERT)
9964 count %= GET_MODE_PRECISION (shift_mode);
9965 else
9967 /* We can't simply return zero because there may be an
9968 outer op. */
9969 varop = const0_rtx;
9970 count = 0;
9971 break;
9975 /* If we discovered we had to complement VAROP, leave. Making a NOT
9976 here would cause an infinite loop. */
9977 if (complement_p)
9978 break;
9980 /* An arithmetic right shift of a quantity known to be -1 or 0
9981 is a no-op. */
9982 if (code == ASHIFTRT
9983 && (num_sign_bit_copies (varop, shift_mode)
9984 == GET_MODE_PRECISION (shift_mode)))
9986 count = 0;
9987 break;
9990 /* If we are doing an arithmetic right shift and discarding all but
9991 the sign bit copies, this is equivalent to doing a shift by the
9992 bitsize minus one. Convert it into that shift because it will often
9993 allow other simplifications. */
9995 if (code == ASHIFTRT
9996 && (count + num_sign_bit_copies (varop, shift_mode)
9997 >= GET_MODE_PRECISION (shift_mode)))
9998 count = GET_MODE_PRECISION (shift_mode) - 1;
10000 /* We simplify the tests below and elsewhere by converting
10001 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10002 `make_compound_operation' will convert it to an ASHIFTRT for
10003 those machines (such as VAX) that don't have an LSHIFTRT. */
10004 if (code == ASHIFTRT
10005 && val_signbit_known_clear_p (shift_mode,
10006 nonzero_bits (varop, shift_mode)))
10007 code = LSHIFTRT;
10009 if (((code == LSHIFTRT
10010 && HWI_COMPUTABLE_MODE_P (shift_mode)
10011 && !(nonzero_bits (varop, shift_mode) >> count))
10012 || (code == ASHIFT
10013 && HWI_COMPUTABLE_MODE_P (shift_mode)
10014 && !((nonzero_bits (varop, shift_mode) << count)
10015 & GET_MODE_MASK (shift_mode))))
10016 && !side_effects_p (varop))
10017 varop = const0_rtx;
10019 switch (GET_CODE (varop))
10021 case SIGN_EXTEND:
10022 case ZERO_EXTEND:
10023 case SIGN_EXTRACT:
10024 case ZERO_EXTRACT:
10025 new_rtx = expand_compound_operation (varop);
10026 if (new_rtx != varop)
10028 varop = new_rtx;
10029 continue;
10031 break;
10033 case MEM:
10034 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10035 minus the width of a smaller mode, we can do this with a
10036 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10037 if ((code == ASHIFTRT || code == LSHIFTRT)
10038 && ! mode_dependent_address_p (XEXP (varop, 0))
10039 && ! MEM_VOLATILE_P (varop)
10040 && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
10041 MODE_INT, 1)) != BLKmode)
10043 new_rtx = adjust_address_nv (varop, tmode,
10044 BYTES_BIG_ENDIAN ? 0
10045 : count / BITS_PER_UNIT);
10047 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10048 : ZERO_EXTEND, mode, new_rtx);
10049 count = 0;
10050 continue;
10052 break;
10054 case SUBREG:
10055 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10056 the same number of words as what we've seen so far. Then store
10057 the widest mode in MODE. */
10058 if (subreg_lowpart_p (varop)
10059 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10060 > GET_MODE_SIZE (GET_MODE (varop)))
10061 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10062 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10063 == mode_words
10064 && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
10065 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
10067 varop = SUBREG_REG (varop);
10068 if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
10069 mode = GET_MODE (varop);
10070 continue;
10072 break;
10074 case MULT:
10075 /* Some machines use MULT instead of ASHIFT because MULT
10076 is cheaper. But it is still better on those machines to
10077 merge two shifts into one. */
10078 if (CONST_INT_P (XEXP (varop, 1))
10079 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10081 varop
10082 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10083 XEXP (varop, 0),
10084 GEN_INT (exact_log2 (
10085 UINTVAL (XEXP (varop, 1)))));
10086 continue;
10088 break;
10090 case UDIV:
10091 /* Similar, for when divides are cheaper. */
10092 if (CONST_INT_P (XEXP (varop, 1))
10093 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10095 varop
10096 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10097 XEXP (varop, 0),
10098 GEN_INT (exact_log2 (
10099 UINTVAL (XEXP (varop, 1)))));
10100 continue;
10102 break;
10104 case ASHIFTRT:
10105 /* If we are extracting just the sign bit of an arithmetic
10106 right shift, that shift is not needed. However, the sign
10107 bit of a wider mode may be different from what would be
10108 interpreted as the sign bit in a narrower mode, so, if
10109 the result is narrower, don't discard the shift. */
10110 if (code == LSHIFTRT
10111 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10112 && (GET_MODE_BITSIZE (result_mode)
10113 >= GET_MODE_BITSIZE (GET_MODE (varop))))
10115 varop = XEXP (varop, 0);
10116 continue;
10119 /* ... fall through ... */
10121 case LSHIFTRT:
10122 case ASHIFT:
10123 case ROTATE:
10124 /* Here we have two nested shifts. The result is usually the
10125 AND of a new shift with a mask. We compute the result below. */
10126 if (CONST_INT_P (XEXP (varop, 1))
10127 && INTVAL (XEXP (varop, 1)) >= 0
10128 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
10129 && HWI_COMPUTABLE_MODE_P (result_mode)
10130 && HWI_COMPUTABLE_MODE_P (mode)
10131 && !VECTOR_MODE_P (result_mode))
10133 enum rtx_code first_code = GET_CODE (varop);
10134 unsigned int first_count = INTVAL (XEXP (varop, 1));
10135 unsigned HOST_WIDE_INT mask;
10136 rtx mask_rtx;
10138 /* We have one common special case. We can't do any merging if
10139 the inner code is an ASHIFTRT of a smaller mode. However, if
10140 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10141 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10142 we can convert it to
10143 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10144 This simplifies certain SIGN_EXTEND operations. */
10145 if (code == ASHIFT && first_code == ASHIFTRT
10146 && count == (GET_MODE_PRECISION (result_mode)
10147 - GET_MODE_PRECISION (GET_MODE (varop))))
10149 /* C3 has the low-order C1 bits zero. */
10151 mask = GET_MODE_MASK (mode)
10152 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
10154 varop = simplify_and_const_int (NULL_RTX, result_mode,
10155 XEXP (varop, 0), mask);
10156 varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
10157 varop, count);
10158 count = first_count;
10159 code = ASHIFTRT;
10160 continue;
10163 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10164 than C1 high-order bits equal to the sign bit, we can convert
10165 this to either an ASHIFT or an ASHIFTRT depending on the
10166 two counts.
10168 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10170 if (code == ASHIFTRT && first_code == ASHIFT
10171 && GET_MODE (varop) == shift_mode
10172 && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10173 > first_count))
10175 varop = XEXP (varop, 0);
10176 count -= first_count;
10177 if (count < 0)
10179 count = -count;
10180 code = ASHIFT;
10183 continue;
10186 /* There are some cases we can't do. If CODE is ASHIFTRT,
10187 we can only do this if FIRST_CODE is also ASHIFTRT.
10189 We can't do the case when CODE is ROTATE and FIRST_CODE is
10190 ASHIFTRT.
10192 If the mode of this shift is not the mode of the outer shift,
10193 we can't do this if either shift is a right shift or ROTATE.
10195 Finally, we can't do any of these if the mode is too wide
10196 unless the codes are the same.
10198 Handle the case where the shift codes are the same
10199 first. */
10201 if (code == first_code)
10203 if (GET_MODE (varop) != result_mode
10204 && (code == ASHIFTRT || code == LSHIFTRT
10205 || code == ROTATE))
10206 break;
10208 count += first_count;
10209 varop = XEXP (varop, 0);
10210 continue;
10213 if (code == ASHIFTRT
10214 || (code == ROTATE && first_code == ASHIFTRT)
10215 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
10216 || (GET_MODE (varop) != result_mode
10217 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10218 || first_code == ROTATE
10219 || code == ROTATE)))
10220 break;
10222 /* To compute the mask to apply after the shift, shift the
10223 nonzero bits of the inner shift the same way the
10224 outer shift will. */
10226 mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
10228 mask_rtx
10229 = simplify_const_binary_operation (code, result_mode, mask_rtx,
10230 GEN_INT (count));
10232 /* Give up if we can't compute an outer operation to use. */
10233 if (mask_rtx == 0
10234 || !CONST_INT_P (mask_rtx)
10235 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10236 INTVAL (mask_rtx),
10237 result_mode, &complement_p))
10238 break;
10240 /* If the shifts are in the same direction, we add the
10241 counts. Otherwise, we subtract them. */
10242 if ((code == ASHIFTRT || code == LSHIFTRT)
10243 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10244 count += first_count;
10245 else
10246 count -= first_count;
10248 /* If COUNT is positive, the new shift is usually CODE,
10249 except for the two exceptions below, in which case it is
10250 FIRST_CODE. If the count is negative, FIRST_CODE should
10251 always be used */
10252 if (count > 0
10253 && ((first_code == ROTATE && code == ASHIFT)
10254 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10255 code = first_code;
10256 else if (count < 0)
10257 code = first_code, count = -count;
10259 varop = XEXP (varop, 0);
10260 continue;
10263 /* If we have (A << B << C) for any shift, we can convert this to
10264 (A << C << B). This wins if A is a constant. Only try this if
10265 B is not a constant. */
10267 else if (GET_CODE (varop) == code
10268 && CONST_INT_P (XEXP (varop, 0))
10269 && !CONST_INT_P (XEXP (varop, 1)))
10271 rtx new_rtx = simplify_const_binary_operation (code, mode,
10272 XEXP (varop, 0),
10273 GEN_INT (count));
10274 varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10275 count = 0;
10276 continue;
10278 break;
10280 case NOT:
10281 if (VECTOR_MODE_P (mode))
10282 break;
10284 /* Make this fit the case below. */
10285 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10286 continue;
10288 case IOR:
10289 case AND:
10290 case XOR:
10291 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10292 with C the size of VAROP - 1 and the shift is logical if
10293 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10294 we have an (le X 0) operation. If we have an arithmetic shift
10295 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10296 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10298 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10299 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10300 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10301 && (code == LSHIFTRT || code == ASHIFTRT)
10302 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10303 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10305 count = 0;
10306 varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10307 const0_rtx);
10309 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10310 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10312 continue;
10315 /* If we have (shift (logical)), move the logical to the outside
10316 to allow it to possibly combine with another logical and the
10317 shift to combine with another shift. This also canonicalizes to
10318 what a ZERO_EXTRACT looks like. Also, some machines have
10319 (and (shift)) insns. */
10321 if (CONST_INT_P (XEXP (varop, 1))
10322 /* We can't do this if we have (ashiftrt (xor)) and the
10323 constant has its sign bit set in shift_mode. */
10324 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10325 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10326 shift_mode))
10327 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10328 XEXP (varop, 1),
10329 GEN_INT (count))) != 0
10330 && CONST_INT_P (new_rtx)
10331 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10332 INTVAL (new_rtx), result_mode, &complement_p))
10334 varop = XEXP (varop, 0);
10335 continue;
10338 /* If we can't do that, try to simplify the shift in each arm of the
10339 logical expression, make a new logical expression, and apply
10340 the inverse distributive law. This also can't be done
10341 for some (ashiftrt (xor)). */
10342 if (CONST_INT_P (XEXP (varop, 1))
10343 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10344 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10345 shift_mode)))
10347 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10348 XEXP (varop, 0), count);
10349 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10350 XEXP (varop, 1), count);
10352 varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10353 lhs, rhs);
10354 varop = apply_distributive_law (varop);
10356 count = 0;
10357 continue;
10359 break;
10361 case EQ:
10362 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10363 says that the sign bit can be tested, FOO has mode MODE, C is
10364 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10365 that may be nonzero. */
10366 if (code == LSHIFTRT
10367 && XEXP (varop, 1) == const0_rtx
10368 && GET_MODE (XEXP (varop, 0)) == result_mode
10369 && count == (GET_MODE_PRECISION (result_mode) - 1)
10370 && HWI_COMPUTABLE_MODE_P (result_mode)
10371 && STORE_FLAG_VALUE == -1
10372 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10373 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10374 &complement_p))
10376 varop = XEXP (varop, 0);
10377 count = 0;
10378 continue;
10380 break;
10382 case NEG:
10383 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10384 than the number of bits in the mode is equivalent to A. */
10385 if (code == LSHIFTRT
10386 && count == (GET_MODE_PRECISION (result_mode) - 1)
10387 && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10389 varop = XEXP (varop, 0);
10390 count = 0;
10391 continue;
10394 /* NEG commutes with ASHIFT since it is multiplication. Move the
10395 NEG outside to allow shifts to combine. */
10396 if (code == ASHIFT
10397 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10398 &complement_p))
10400 varop = XEXP (varop, 0);
10401 continue;
10403 break;
10405 case PLUS:
10406 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10407 is one less than the number of bits in the mode is
10408 equivalent to (xor A 1). */
10409 if (code == LSHIFTRT
10410 && count == (GET_MODE_PRECISION (result_mode) - 1)
10411 && XEXP (varop, 1) == constm1_rtx
10412 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10413 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10414 &complement_p))
10416 count = 0;
10417 varop = XEXP (varop, 0);
10418 continue;
10421 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10422 that might be nonzero in BAR are those being shifted out and those
10423 bits are known zero in FOO, we can replace the PLUS with FOO.
10424 Similarly in the other operand order. This code occurs when
10425 we are computing the size of a variable-size array. */
10427 if ((code == ASHIFTRT || code == LSHIFTRT)
10428 && count < HOST_BITS_PER_WIDE_INT
10429 && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10430 && (nonzero_bits (XEXP (varop, 1), result_mode)
10431 & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10433 varop = XEXP (varop, 0);
10434 continue;
10436 else if ((code == ASHIFTRT || code == LSHIFTRT)
10437 && count < HOST_BITS_PER_WIDE_INT
10438 && HWI_COMPUTABLE_MODE_P (result_mode)
10439 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10440 >> count)
10441 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10442 & nonzero_bits (XEXP (varop, 1),
10443 result_mode)))
10445 varop = XEXP (varop, 1);
10446 continue;
10449 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10450 if (code == ASHIFT
10451 && CONST_INT_P (XEXP (varop, 1))
10452 && (new_rtx = simplify_const_binary_operation (ASHIFT, result_mode,
10453 XEXP (varop, 1),
10454 GEN_INT (count))) != 0
10455 && CONST_INT_P (new_rtx)
10456 && merge_outer_ops (&outer_op, &outer_const, PLUS,
10457 INTVAL (new_rtx), result_mode, &complement_p))
10459 varop = XEXP (varop, 0);
10460 continue;
10463 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10464 signbit', and attempt to change the PLUS to an XOR and move it to
10465 the outer operation as is done above in the AND/IOR/XOR case
10466 leg for shift(logical). See details in logical handling above
10467 for reasoning in doing so. */
10468 if (code == LSHIFTRT
10469 && CONST_INT_P (XEXP (varop, 1))
10470 && mode_signbit_p (result_mode, XEXP (varop, 1))
10471 && (new_rtx = simplify_const_binary_operation (code, result_mode,
10472 XEXP (varop, 1),
10473 GEN_INT (count))) != 0
10474 && CONST_INT_P (new_rtx)
10475 && merge_outer_ops (&outer_op, &outer_const, XOR,
10476 INTVAL (new_rtx), result_mode, &complement_p))
10478 varop = XEXP (varop, 0);
10479 continue;
10482 break;
10484 case MINUS:
10485 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10486 with C the size of VAROP - 1 and the shift is logical if
10487 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10488 we have a (gt X 0) operation. If the shift is arithmetic with
10489 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10490 we have a (neg (gt X 0)) operation. */
10492 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10493 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10494 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10495 && (code == LSHIFTRT || code == ASHIFTRT)
10496 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10497 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10498 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10500 count = 0;
10501 varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10502 const0_rtx);
10504 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10505 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10507 continue;
10509 break;
10511 case TRUNCATE:
10512 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10513 if the truncate does not affect the value. */
10514 if (code == LSHIFTRT
10515 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10516 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10517 && (INTVAL (XEXP (XEXP (varop, 0), 1))
10518 >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0)))
10519 - GET_MODE_PRECISION (GET_MODE (varop)))))
10521 rtx varop_inner = XEXP (varop, 0);
10523 varop_inner
10524 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10525 XEXP (varop_inner, 0),
10526 GEN_INT
10527 (count + INTVAL (XEXP (varop_inner, 1))));
10528 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10529 count = 0;
10530 continue;
10532 break;
10534 default:
10535 break;
10538 break;
10541 shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10542 outer_op, outer_const);
10544 /* We have now finished analyzing the shift. The result should be
10545 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10546 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10547 to the result of the shift. OUTER_CONST is the relevant constant,
10548 but we must turn off all bits turned off in the shift. */
10550 if (outer_op == UNKNOWN
10551 && orig_code == code && orig_count == count
10552 && varop == orig_varop
10553 && shift_mode == GET_MODE (varop))
10554 return NULL_RTX;
10556 /* Make a SUBREG if necessary. If we can't make it, fail. */
10557 varop = gen_lowpart (shift_mode, varop);
10558 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10559 return NULL_RTX;
10561 /* If we have an outer operation and we just made a shift, it is
10562 possible that we could have simplified the shift were it not
10563 for the outer operation. So try to do the simplification
10564 recursively. */
10566 if (outer_op != UNKNOWN)
10567 x = simplify_shift_const_1 (code, shift_mode, varop, count);
10568 else
10569 x = NULL_RTX;
10571 if (x == NULL_RTX)
10572 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10574 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10575 turn off all the bits that the shift would have turned off. */
10576 if (orig_code == LSHIFTRT && result_mode != shift_mode)
10577 x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10578 GET_MODE_MASK (result_mode) >> orig_count);
10580 /* Do the remainder of the processing in RESULT_MODE. */
10581 x = gen_lowpart_or_truncate (result_mode, x);
10583 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10584 operation. */
10585 if (complement_p)
10586 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10588 if (outer_op != UNKNOWN)
10590 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10591 && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
10592 outer_const = trunc_int_for_mode (outer_const, result_mode);
10594 if (outer_op == AND)
10595 x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10596 else if (outer_op == SET)
10598 /* This means that we have determined that the result is
10599 equivalent to a constant. This should be rare. */
10600 if (!side_effects_p (x))
10601 x = GEN_INT (outer_const);
10603 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10604 x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10605 else
10606 x = simplify_gen_binary (outer_op, result_mode, x,
10607 GEN_INT (outer_const));
10610 return x;
10613 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10614 The result of the shift is RESULT_MODE. If we cannot simplify it,
10615 return X or, if it is NULL, synthesize the expression with
10616 simplify_gen_binary. Otherwise, return a simplified value.
10618 The shift is normally computed in the widest mode we find in VAROP, as
10619 long as it isn't a different number of words than RESULT_MODE. Exceptions
10620 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10622 static rtx
10623 simplify_shift_const (rtx x, enum rtx_code code, enum machine_mode result_mode,
10624 rtx varop, int count)
10626 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10627 if (tem)
10628 return tem;
10630 if (!x)
10631 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10632 if (GET_MODE (x) != result_mode)
10633 x = gen_lowpart (result_mode, x);
10634 return x;
10638 /* Like recog, but we receive the address of a pointer to a new pattern.
10639 We try to match the rtx that the pointer points to.
10640 If that fails, we may try to modify or replace the pattern,
10641 storing the replacement into the same pointer object.
10643 Modifications include deletion or addition of CLOBBERs.
10645 PNOTES is a pointer to a location where any REG_UNUSED notes added for
10646 the CLOBBERs are placed.
10648 The value is the final insn code from the pattern ultimately matched,
10649 or -1. */
10651 static int
10652 recog_for_combine (rtx *pnewpat, rtx insn, rtx *pnotes)
10654 rtx pat = *pnewpat;
10655 int insn_code_number;
10656 int num_clobbers_to_add = 0;
10657 int i;
10658 rtx notes = 0;
10659 rtx old_notes, old_pat;
10661 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10662 we use to indicate that something didn't match. If we find such a
10663 thing, force rejection. */
10664 if (GET_CODE (pat) == PARALLEL)
10665 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10666 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10667 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10668 return -1;
10670 old_pat = PATTERN (insn);
10671 old_notes = REG_NOTES (insn);
10672 PATTERN (insn) = pat;
10673 REG_NOTES (insn) = 0;
10675 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10676 if (dump_file && (dump_flags & TDF_DETAILS))
10678 if (insn_code_number < 0)
10679 fputs ("Failed to match this instruction:\n", dump_file);
10680 else
10681 fputs ("Successfully matched this instruction:\n", dump_file);
10682 print_rtl_single (dump_file, pat);
10685 /* If it isn't, there is the possibility that we previously had an insn
10686 that clobbered some register as a side effect, but the combined
10687 insn doesn't need to do that. So try once more without the clobbers
10688 unless this represents an ASM insn. */
10690 if (insn_code_number < 0 && ! check_asm_operands (pat)
10691 && GET_CODE (pat) == PARALLEL)
10693 int pos;
10695 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
10696 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
10698 if (i != pos)
10699 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
10700 pos++;
10703 SUBST_INT (XVECLEN (pat, 0), pos);
10705 if (pos == 1)
10706 pat = XVECEXP (pat, 0, 0);
10708 PATTERN (insn) = pat;
10709 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10710 if (dump_file && (dump_flags & TDF_DETAILS))
10712 if (insn_code_number < 0)
10713 fputs ("Failed to match this instruction:\n", dump_file);
10714 else
10715 fputs ("Successfully matched this instruction:\n", dump_file);
10716 print_rtl_single (dump_file, pat);
10719 PATTERN (insn) = old_pat;
10720 REG_NOTES (insn) = old_notes;
10722 /* Recognize all noop sets, these will be killed by followup pass. */
10723 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
10724 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
10726 /* If we had any clobbers to add, make a new pattern than contains
10727 them. Then check to make sure that all of them are dead. */
10728 if (num_clobbers_to_add)
10730 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
10731 rtvec_alloc (GET_CODE (pat) == PARALLEL
10732 ? (XVECLEN (pat, 0)
10733 + num_clobbers_to_add)
10734 : num_clobbers_to_add + 1));
10736 if (GET_CODE (pat) == PARALLEL)
10737 for (i = 0; i < XVECLEN (pat, 0); i++)
10738 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
10739 else
10740 XVECEXP (newpat, 0, 0) = pat;
10742 add_clobbers (newpat, insn_code_number);
10744 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
10745 i < XVECLEN (newpat, 0); i++)
10747 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
10748 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
10749 return -1;
10750 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
10752 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
10753 notes = alloc_reg_note (REG_UNUSED,
10754 XEXP (XVECEXP (newpat, 0, i), 0), notes);
10757 pat = newpat;
10760 *pnewpat = pat;
10761 *pnotes = notes;
10763 return insn_code_number;
10766 /* Like gen_lowpart_general but for use by combine. In combine it
10767 is not possible to create any new pseudoregs. However, it is
10768 safe to create invalid memory addresses, because combine will
10769 try to recognize them and all they will do is make the combine
10770 attempt fail.
10772 If for some reason this cannot do its job, an rtx
10773 (clobber (const_int 0)) is returned.
10774 An insn containing that will not be recognized. */
10776 static rtx
10777 gen_lowpart_for_combine (enum machine_mode omode, rtx x)
10779 enum machine_mode imode = GET_MODE (x);
10780 unsigned int osize = GET_MODE_SIZE (omode);
10781 unsigned int isize = GET_MODE_SIZE (imode);
10782 rtx result;
10784 if (omode == imode)
10785 return x;
10787 /* Return identity if this is a CONST or symbolic reference. */
10788 if (omode == Pmode
10789 && (GET_CODE (x) == CONST
10790 || GET_CODE (x) == SYMBOL_REF
10791 || GET_CODE (x) == LABEL_REF))
10792 return x;
10794 /* We can only support MODE being wider than a word if X is a
10795 constant integer or has a mode the same size. */
10796 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
10797 && ! ((imode == VOIDmode
10798 && (CONST_INT_P (x)
10799 || GET_CODE (x) == CONST_DOUBLE))
10800 || isize == osize))
10801 goto fail;
10803 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
10804 won't know what to do. So we will strip off the SUBREG here and
10805 process normally. */
10806 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
10808 x = SUBREG_REG (x);
10810 /* For use in case we fall down into the address adjustments
10811 further below, we need to adjust the known mode and size of
10812 x; imode and isize, since we just adjusted x. */
10813 imode = GET_MODE (x);
10815 if (imode == omode)
10816 return x;
10818 isize = GET_MODE_SIZE (imode);
10821 result = gen_lowpart_common (omode, x);
10823 if (result)
10824 return result;
10826 if (MEM_P (x))
10828 int offset = 0;
10830 /* Refuse to work on a volatile memory ref or one with a mode-dependent
10831 address. */
10832 if (MEM_VOLATILE_P (x) || mode_dependent_address_p (XEXP (x, 0)))
10833 goto fail;
10835 /* If we want to refer to something bigger than the original memref,
10836 generate a paradoxical subreg instead. That will force a reload
10837 of the original memref X. */
10838 if (isize < osize)
10839 return gen_rtx_SUBREG (omode, x, 0);
10841 if (WORDS_BIG_ENDIAN)
10842 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
10844 /* Adjust the address so that the address-after-the-data is
10845 unchanged. */
10846 if (BYTES_BIG_ENDIAN)
10847 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
10849 return adjust_address_nv (x, omode, offset);
10852 /* If X is a comparison operator, rewrite it in a new mode. This
10853 probably won't match, but may allow further simplifications. */
10854 else if (COMPARISON_P (x))
10855 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
10857 /* If we couldn't simplify X any other way, just enclose it in a
10858 SUBREG. Normally, this SUBREG won't match, but some patterns may
10859 include an explicit SUBREG or we may simplify it further in combine. */
10860 else
10862 int offset = 0;
10863 rtx res;
10865 offset = subreg_lowpart_offset (omode, imode);
10866 if (imode == VOIDmode)
10868 imode = int_mode_for_mode (omode);
10869 x = gen_lowpart_common (imode, x);
10870 if (x == NULL)
10871 goto fail;
10873 res = simplify_gen_subreg (omode, x, imode, offset);
10874 if (res)
10875 return res;
10878 fail:
10879 return gen_rtx_CLOBBER (omode, const0_rtx);
10882 /* Try to simplify a comparison between OP0 and a constant OP1,
10883 where CODE is the comparison code that will be tested, into a
10884 (CODE OP0 const0_rtx) form.
10886 The result is a possibly different comparison code to use.
10887 *POP1 may be updated. */
10889 static enum rtx_code
10890 simplify_compare_const (enum rtx_code code, rtx op0, rtx *pop1)
10892 enum machine_mode mode = GET_MODE (op0);
10893 unsigned int mode_width = GET_MODE_PRECISION (mode);
10894 HOST_WIDE_INT const_op = INTVAL (*pop1);
10896 /* Get the constant we are comparing against and turn off all bits
10897 not on in our mode. */
10898 if (mode != VOIDmode)
10899 const_op = trunc_int_for_mode (const_op, mode);
10901 /* If we are comparing against a constant power of two and the value
10902 being compared can only have that single bit nonzero (e.g., it was
10903 `and'ed with that bit), we can replace this with a comparison
10904 with zero. */
10905 if (const_op
10906 && (code == EQ || code == NE || code == GE || code == GEU
10907 || code == LT || code == LTU)
10908 && mode_width <= HOST_BITS_PER_WIDE_INT
10909 && exact_log2 (const_op) >= 0
10910 && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op)
10912 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
10913 const_op = 0;
10916 /* Similarly, if we are comparing a value known to be either -1 or
10917 0 with -1, change it to the opposite comparison against zero. */
10918 if (const_op == -1
10919 && (code == EQ || code == NE || code == GT || code == LE
10920 || code == GEU || code == LTU)
10921 && num_sign_bit_copies (op0, mode) == mode_width)
10923 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
10924 const_op = 0;
10927 /* Do some canonicalizations based on the comparison code. We prefer
10928 comparisons against zero and then prefer equality comparisons.
10929 If we can reduce the size of a constant, we will do that too. */
10930 switch (code)
10932 case LT:
10933 /* < C is equivalent to <= (C - 1) */
10934 if (const_op > 0)
10936 const_op -= 1;
10937 code = LE;
10938 /* ... fall through to LE case below. */
10940 else
10941 break;
10943 case LE:
10944 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
10945 if (const_op < 0)
10947 const_op += 1;
10948 code = LT;
10951 /* If we are doing a <= 0 comparison on a value known to have
10952 a zero sign bit, we can replace this with == 0. */
10953 else if (const_op == 0
10954 && mode_width <= HOST_BITS_PER_WIDE_INT
10955 && (nonzero_bits (op0, mode)
10956 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10957 == 0)
10958 code = EQ;
10959 break;
10961 case GE:
10962 /* >= C is equivalent to > (C - 1). */
10963 if (const_op > 0)
10965 const_op -= 1;
10966 code = GT;
10967 /* ... fall through to GT below. */
10969 else
10970 break;
10972 case GT:
10973 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
10974 if (const_op < 0)
10976 const_op += 1;
10977 code = GE;
10980 /* If we are doing a > 0 comparison on a value known to have
10981 a zero sign bit, we can replace this with != 0. */
10982 else if (const_op == 0
10983 && mode_width <= HOST_BITS_PER_WIDE_INT
10984 && (nonzero_bits (op0, mode)
10985 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10986 == 0)
10987 code = NE;
10988 break;
10990 case LTU:
10991 /* < C is equivalent to <= (C - 1). */
10992 if (const_op > 0)
10994 const_op -= 1;
10995 code = LEU;
10996 /* ... fall through ... */
10998 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
10999 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11000 && (unsigned HOST_WIDE_INT) const_op
11001 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11003 const_op = 0;
11004 code = GE;
11005 break;
11007 else
11008 break;
11010 case LEU:
11011 /* unsigned <= 0 is equivalent to == 0 */
11012 if (const_op == 0)
11013 code = EQ;
11014 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11015 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11016 && (unsigned HOST_WIDE_INT) const_op
11017 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11019 const_op = 0;
11020 code = GE;
11022 break;
11024 case GEU:
11025 /* >= C is equivalent to > (C - 1). */
11026 if (const_op > 1)
11028 const_op -= 1;
11029 code = GTU;
11030 /* ... fall through ... */
11033 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11034 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11035 && (unsigned HOST_WIDE_INT) const_op
11036 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11038 const_op = 0;
11039 code = LT;
11040 break;
11042 else
11043 break;
11045 case GTU:
11046 /* unsigned > 0 is equivalent to != 0 */
11047 if (const_op == 0)
11048 code = NE;
11049 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11050 else if (mode_width <= HOST_BITS_PER_WIDE_INT
11051 && (unsigned HOST_WIDE_INT) const_op
11052 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11054 const_op = 0;
11055 code = LT;
11057 break;
11059 default:
11060 break;
11063 *pop1 = GEN_INT (const_op);
11064 return code;
11067 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11068 comparison code that will be tested.
11070 The result is a possibly different comparison code to use. *POP0 and
11071 *POP1 may be updated.
11073 It is possible that we might detect that a comparison is either always
11074 true or always false. However, we do not perform general constant
11075 folding in combine, so this knowledge isn't useful. Such tautologies
11076 should have been detected earlier. Hence we ignore all such cases. */
11078 static enum rtx_code
11079 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11081 rtx op0 = *pop0;
11082 rtx op1 = *pop1;
11083 rtx tem, tem1;
11084 int i;
11085 enum machine_mode mode, tmode;
11087 /* Try a few ways of applying the same transformation to both operands. */
11088 while (1)
11090 #ifndef WORD_REGISTER_OPERATIONS
11091 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11092 so check specially. */
11093 if (code != GTU && code != GEU && code != LTU && code != LEU
11094 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11095 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11096 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11097 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11098 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11099 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
11100 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
11101 && CONST_INT_P (XEXP (op0, 1))
11102 && XEXP (op0, 1) == XEXP (op1, 1)
11103 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11104 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11105 && (INTVAL (XEXP (op0, 1))
11106 == (GET_MODE_PRECISION (GET_MODE (op0))
11107 - (GET_MODE_PRECISION
11108 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
11110 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11111 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11113 #endif
11115 /* If both operands are the same constant shift, see if we can ignore the
11116 shift. We can if the shift is a rotate or if the bits shifted out of
11117 this shift are known to be zero for both inputs and if the type of
11118 comparison is compatible with the shift. */
11119 if (GET_CODE (op0) == GET_CODE (op1)
11120 && HWI_COMPUTABLE_MODE_P (GET_MODE(op0))
11121 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11122 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11123 && (code != GT && code != LT && code != GE && code != LE))
11124 || (GET_CODE (op0) == ASHIFTRT
11125 && (code != GTU && code != LTU
11126 && code != GEU && code != LEU)))
11127 && CONST_INT_P (XEXP (op0, 1))
11128 && INTVAL (XEXP (op0, 1)) >= 0
11129 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11130 && XEXP (op0, 1) == XEXP (op1, 1))
11132 enum machine_mode mode = GET_MODE (op0);
11133 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11134 int shift_count = INTVAL (XEXP (op0, 1));
11136 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11137 mask &= (mask >> shift_count) << shift_count;
11138 else if (GET_CODE (op0) == ASHIFT)
11139 mask = (mask & (mask << shift_count)) >> shift_count;
11141 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11142 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11143 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11144 else
11145 break;
11148 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11149 SUBREGs are of the same mode, and, in both cases, the AND would
11150 be redundant if the comparison was done in the narrower mode,
11151 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11152 and the operand's possibly nonzero bits are 0xffffff01; in that case
11153 if we only care about QImode, we don't need the AND). This case
11154 occurs if the output mode of an scc insn is not SImode and
11155 STORE_FLAG_VALUE == 1 (e.g., the 386).
11157 Similarly, check for a case where the AND's are ZERO_EXTEND
11158 operations from some narrower mode even though a SUBREG is not
11159 present. */
11161 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11162 && CONST_INT_P (XEXP (op0, 1))
11163 && CONST_INT_P (XEXP (op1, 1)))
11165 rtx inner_op0 = XEXP (op0, 0);
11166 rtx inner_op1 = XEXP (op1, 0);
11167 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11168 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11169 int changed = 0;
11171 if (paradoxical_subreg_p (inner_op0)
11172 && GET_CODE (inner_op1) == SUBREG
11173 && (GET_MODE (SUBREG_REG (inner_op0))
11174 == GET_MODE (SUBREG_REG (inner_op1)))
11175 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11176 <= HOST_BITS_PER_WIDE_INT)
11177 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11178 GET_MODE (SUBREG_REG (inner_op0)))))
11179 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11180 GET_MODE (SUBREG_REG (inner_op1))))))
11182 op0 = SUBREG_REG (inner_op0);
11183 op1 = SUBREG_REG (inner_op1);
11185 /* The resulting comparison is always unsigned since we masked
11186 off the original sign bit. */
11187 code = unsigned_condition (code);
11189 changed = 1;
11192 else if (c0 == c1)
11193 for (tmode = GET_CLASS_NARROWEST_MODE
11194 (GET_MODE_CLASS (GET_MODE (op0)));
11195 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
11196 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11198 op0 = gen_lowpart (tmode, inner_op0);
11199 op1 = gen_lowpart (tmode, inner_op1);
11200 code = unsigned_condition (code);
11201 changed = 1;
11202 break;
11205 if (! changed)
11206 break;
11209 /* If both operands are NOT, we can strip off the outer operation
11210 and adjust the comparison code for swapped operands; similarly for
11211 NEG, except that this must be an equality comparison. */
11212 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11213 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11214 && (code == EQ || code == NE)))
11215 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11217 else
11218 break;
11221 /* If the first operand is a constant, swap the operands and adjust the
11222 comparison code appropriately, but don't do this if the second operand
11223 is already a constant integer. */
11224 if (swap_commutative_operands_p (op0, op1))
11226 tem = op0, op0 = op1, op1 = tem;
11227 code = swap_condition (code);
11230 /* We now enter a loop during which we will try to simplify the comparison.
11231 For the most part, we only are concerned with comparisons with zero,
11232 but some things may really be comparisons with zero but not start
11233 out looking that way. */
11235 while (CONST_INT_P (op1))
11237 enum machine_mode mode = GET_MODE (op0);
11238 unsigned int mode_width = GET_MODE_PRECISION (mode);
11239 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11240 int equality_comparison_p;
11241 int sign_bit_comparison_p;
11242 int unsigned_comparison_p;
11243 HOST_WIDE_INT const_op;
11245 /* We only want to handle integral modes. This catches VOIDmode,
11246 CCmode, and the floating-point modes. An exception is that we
11247 can handle VOIDmode if OP0 is a COMPARE or a comparison
11248 operation. */
11250 if (GET_MODE_CLASS (mode) != MODE_INT
11251 && ! (mode == VOIDmode
11252 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
11253 break;
11255 /* Try to simplify the compare to constant, possibly changing the
11256 comparison op, and/or changing op1 to zero. */
11257 code = simplify_compare_const (code, op0, &op1);
11258 const_op = INTVAL (op1);
11260 /* Compute some predicates to simplify code below. */
11262 equality_comparison_p = (code == EQ || code == NE);
11263 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11264 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11265 || code == GEU);
11267 /* If this is a sign bit comparison and we can do arithmetic in
11268 MODE, say that we will only be needing the sign bit of OP0. */
11269 if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
11270 op0 = force_to_mode (op0, mode,
11271 (unsigned HOST_WIDE_INT) 1
11272 << (GET_MODE_PRECISION (mode) - 1),
11275 /* Now try cases based on the opcode of OP0. If none of the cases
11276 does a "continue", we exit this loop immediately after the
11277 switch. */
11279 switch (GET_CODE (op0))
11281 case ZERO_EXTRACT:
11282 /* If we are extracting a single bit from a variable position in
11283 a constant that has only a single bit set and are comparing it
11284 with zero, we can convert this into an equality comparison
11285 between the position and the location of the single bit. */
11286 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11287 have already reduced the shift count modulo the word size. */
11288 if (!SHIFT_COUNT_TRUNCATED
11289 && CONST_INT_P (XEXP (op0, 0))
11290 && XEXP (op0, 1) == const1_rtx
11291 && equality_comparison_p && const_op == 0
11292 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11294 if (BITS_BIG_ENDIAN)
11296 enum machine_mode new_mode
11297 = mode_for_extraction (EP_extzv, 1);
11298 if (new_mode == MAX_MACHINE_MODE)
11299 i = BITS_PER_WORD - 1 - i;
11300 else
11302 mode = new_mode;
11303 i = (GET_MODE_PRECISION (mode) - 1 - i);
11307 op0 = XEXP (op0, 2);
11308 op1 = GEN_INT (i);
11309 const_op = i;
11311 /* Result is nonzero iff shift count is equal to I. */
11312 code = reverse_condition (code);
11313 continue;
11316 /* ... fall through ... */
11318 case SIGN_EXTRACT:
11319 tem = expand_compound_operation (op0);
11320 if (tem != op0)
11322 op0 = tem;
11323 continue;
11325 break;
11327 case NOT:
11328 /* If testing for equality, we can take the NOT of the constant. */
11329 if (equality_comparison_p
11330 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11332 op0 = XEXP (op0, 0);
11333 op1 = tem;
11334 continue;
11337 /* If just looking at the sign bit, reverse the sense of the
11338 comparison. */
11339 if (sign_bit_comparison_p)
11341 op0 = XEXP (op0, 0);
11342 code = (code == GE ? LT : GE);
11343 continue;
11345 break;
11347 case NEG:
11348 /* If testing for equality, we can take the NEG of the constant. */
11349 if (equality_comparison_p
11350 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11352 op0 = XEXP (op0, 0);
11353 op1 = tem;
11354 continue;
11357 /* The remaining cases only apply to comparisons with zero. */
11358 if (const_op != 0)
11359 break;
11361 /* When X is ABS or is known positive,
11362 (neg X) is < 0 if and only if X != 0. */
11364 if (sign_bit_comparison_p
11365 && (GET_CODE (XEXP (op0, 0)) == ABS
11366 || (mode_width <= HOST_BITS_PER_WIDE_INT
11367 && (nonzero_bits (XEXP (op0, 0), mode)
11368 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11369 == 0)))
11371 op0 = XEXP (op0, 0);
11372 code = (code == LT ? NE : EQ);
11373 continue;
11376 /* If we have NEG of something whose two high-order bits are the
11377 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11378 if (num_sign_bit_copies (op0, mode) >= 2)
11380 op0 = XEXP (op0, 0);
11381 code = swap_condition (code);
11382 continue;
11384 break;
11386 case ROTATE:
11387 /* If we are testing equality and our count is a constant, we
11388 can perform the inverse operation on our RHS. */
11389 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11390 && (tem = simplify_binary_operation (ROTATERT, mode,
11391 op1, XEXP (op0, 1))) != 0)
11393 op0 = XEXP (op0, 0);
11394 op1 = tem;
11395 continue;
11398 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11399 a particular bit. Convert it to an AND of a constant of that
11400 bit. This will be converted into a ZERO_EXTRACT. */
11401 if (const_op == 0 && sign_bit_comparison_p
11402 && CONST_INT_P (XEXP (op0, 1))
11403 && mode_width <= HOST_BITS_PER_WIDE_INT)
11405 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11406 ((unsigned HOST_WIDE_INT) 1
11407 << (mode_width - 1
11408 - INTVAL (XEXP (op0, 1)))));
11409 code = (code == LT ? NE : EQ);
11410 continue;
11413 /* Fall through. */
11415 case ABS:
11416 /* ABS is ignorable inside an equality comparison with zero. */
11417 if (const_op == 0 && equality_comparison_p)
11419 op0 = XEXP (op0, 0);
11420 continue;
11422 break;
11424 case SIGN_EXTEND:
11425 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11426 (compare FOO CONST) if CONST fits in FOO's mode and we
11427 are either testing inequality or have an unsigned
11428 comparison with ZERO_EXTEND or a signed comparison with
11429 SIGN_EXTEND. But don't do it if we don't have a compare
11430 insn of the given mode, since we'd have to revert it
11431 later on, and then we wouldn't know whether to sign- or
11432 zero-extend. */
11433 mode = GET_MODE (XEXP (op0, 0));
11434 if (GET_MODE_CLASS (mode) == MODE_INT
11435 && ! unsigned_comparison_p
11436 && HWI_COMPUTABLE_MODE_P (mode)
11437 && trunc_int_for_mode (const_op, mode) == const_op
11438 && have_insn_for (COMPARE, mode))
11440 op0 = XEXP (op0, 0);
11441 continue;
11443 break;
11445 case SUBREG:
11446 /* Check for the case where we are comparing A - C1 with C2, that is
11448 (subreg:MODE (plus (A) (-C1))) op (C2)
11450 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11451 comparison in the wider mode. One of the following two conditions
11452 must be true in order for this to be valid:
11454 1. The mode extension results in the same bit pattern being added
11455 on both sides and the comparison is equality or unsigned. As
11456 C2 has been truncated to fit in MODE, the pattern can only be
11457 all 0s or all 1s.
11459 2. The mode extension results in the sign bit being copied on
11460 each side.
11462 The difficulty here is that we have predicates for A but not for
11463 (A - C1) so we need to check that C1 is within proper bounds so
11464 as to perturbate A as little as possible. */
11466 if (mode_width <= HOST_BITS_PER_WIDE_INT
11467 && subreg_lowpart_p (op0)
11468 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
11469 && GET_CODE (SUBREG_REG (op0)) == PLUS
11470 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11472 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11473 rtx a = XEXP (SUBREG_REG (op0), 0);
11474 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11476 if ((c1 > 0
11477 && (unsigned HOST_WIDE_INT) c1
11478 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11479 && (equality_comparison_p || unsigned_comparison_p)
11480 /* (A - C1) zero-extends if it is positive and sign-extends
11481 if it is negative, C2 both zero- and sign-extends. */
11482 && ((0 == (nonzero_bits (a, inner_mode)
11483 & ~GET_MODE_MASK (mode))
11484 && const_op >= 0)
11485 /* (A - C1) sign-extends if it is positive and 1-extends
11486 if it is negative, C2 both sign- and 1-extends. */
11487 || (num_sign_bit_copies (a, inner_mode)
11488 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11489 - mode_width)
11490 && const_op < 0)))
11491 || ((unsigned HOST_WIDE_INT) c1
11492 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11493 /* (A - C1) always sign-extends, like C2. */
11494 && num_sign_bit_copies (a, inner_mode)
11495 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11496 - (mode_width - 1))))
11498 op0 = SUBREG_REG (op0);
11499 continue;
11503 /* If the inner mode is narrower and we are extracting the low part,
11504 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11505 if (subreg_lowpart_p (op0)
11506 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
11507 /* Fall through */ ;
11508 else
11509 break;
11511 /* ... fall through ... */
11513 case ZERO_EXTEND:
11514 mode = GET_MODE (XEXP (op0, 0));
11515 if (GET_MODE_CLASS (mode) == MODE_INT
11516 && (unsigned_comparison_p || equality_comparison_p)
11517 && HWI_COMPUTABLE_MODE_P (mode)
11518 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
11519 && const_op >= 0
11520 && have_insn_for (COMPARE, mode))
11522 op0 = XEXP (op0, 0);
11523 continue;
11525 break;
11527 case PLUS:
11528 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11529 this for equality comparisons due to pathological cases involving
11530 overflows. */
11531 if (equality_comparison_p
11532 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11533 op1, XEXP (op0, 1))))
11535 op0 = XEXP (op0, 0);
11536 op1 = tem;
11537 continue;
11540 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11541 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11542 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11544 op0 = XEXP (XEXP (op0, 0), 0);
11545 code = (code == LT ? EQ : NE);
11546 continue;
11548 break;
11550 case MINUS:
11551 /* We used to optimize signed comparisons against zero, but that
11552 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11553 arrive here as equality comparisons, or (GEU, LTU) are
11554 optimized away. No need to special-case them. */
11556 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11557 (eq B (minus A C)), whichever simplifies. We can only do
11558 this for equality comparisons due to pathological cases involving
11559 overflows. */
11560 if (equality_comparison_p
11561 && 0 != (tem = simplify_binary_operation (PLUS, mode,
11562 XEXP (op0, 1), op1)))
11564 op0 = XEXP (op0, 0);
11565 op1 = tem;
11566 continue;
11569 if (equality_comparison_p
11570 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11571 XEXP (op0, 0), op1)))
11573 op0 = XEXP (op0, 1);
11574 op1 = tem;
11575 continue;
11578 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11579 of bits in X minus 1, is one iff X > 0. */
11580 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
11581 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11582 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
11583 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11585 op0 = XEXP (op0, 1);
11586 code = (code == GE ? LE : GT);
11587 continue;
11589 break;
11591 case XOR:
11592 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11593 if C is zero or B is a constant. */
11594 if (equality_comparison_p
11595 && 0 != (tem = simplify_binary_operation (XOR, mode,
11596 XEXP (op0, 1), op1)))
11598 op0 = XEXP (op0, 0);
11599 op1 = tem;
11600 continue;
11602 break;
11604 case EQ: case NE:
11605 case UNEQ: case LTGT:
11606 case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
11607 case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
11608 case UNORDERED: case ORDERED:
11609 /* We can't do anything if OP0 is a condition code value, rather
11610 than an actual data value. */
11611 if (const_op != 0
11612 || CC0_P (XEXP (op0, 0))
11613 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
11614 break;
11616 /* Get the two operands being compared. */
11617 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
11618 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
11619 else
11620 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
11622 /* Check for the cases where we simply want the result of the
11623 earlier test or the opposite of that result. */
11624 if (code == NE || code == EQ
11625 || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
11626 && (code == LT || code == GE)))
11628 enum rtx_code new_code;
11629 if (code == LT || code == NE)
11630 new_code = GET_CODE (op0);
11631 else
11632 new_code = reversed_comparison_code (op0, NULL);
11634 if (new_code != UNKNOWN)
11636 code = new_code;
11637 op0 = tem;
11638 op1 = tem1;
11639 continue;
11642 break;
11644 case IOR:
11645 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11646 iff X <= 0. */
11647 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
11648 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
11649 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11651 op0 = XEXP (op0, 1);
11652 code = (code == GE ? GT : LE);
11653 continue;
11655 break;
11657 case AND:
11658 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11659 will be converted to a ZERO_EXTRACT later. */
11660 if (const_op == 0 && equality_comparison_p
11661 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11662 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
11664 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
11665 XEXP (XEXP (op0, 0), 1));
11666 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11667 continue;
11670 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11671 zero and X is a comparison and C1 and C2 describe only bits set
11672 in STORE_FLAG_VALUE, we can compare with X. */
11673 if (const_op == 0 && equality_comparison_p
11674 && mode_width <= HOST_BITS_PER_WIDE_INT
11675 && CONST_INT_P (XEXP (op0, 1))
11676 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
11677 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11678 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
11679 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
11681 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11682 << INTVAL (XEXP (XEXP (op0, 0), 1)));
11683 if ((~STORE_FLAG_VALUE & mask) == 0
11684 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
11685 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
11686 && COMPARISON_P (tem))))
11688 op0 = XEXP (XEXP (op0, 0), 0);
11689 continue;
11693 /* If we are doing an equality comparison of an AND of a bit equal
11694 to the sign bit, replace this with a LT or GE comparison of
11695 the underlying value. */
11696 if (equality_comparison_p
11697 && const_op == 0
11698 && CONST_INT_P (XEXP (op0, 1))
11699 && mode_width <= HOST_BITS_PER_WIDE_INT
11700 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11701 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11703 op0 = XEXP (op0, 0);
11704 code = (code == EQ ? GE : LT);
11705 continue;
11708 /* If this AND operation is really a ZERO_EXTEND from a narrower
11709 mode, the constant fits within that mode, and this is either an
11710 equality or unsigned comparison, try to do this comparison in
11711 the narrower mode.
11713 Note that in:
11715 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11716 -> (ne:DI (reg:SI 4) (const_int 0))
11718 unless TRULY_NOOP_TRUNCATION allows it or the register is
11719 known to hold a value of the required mode the
11720 transformation is invalid. */
11721 if ((equality_comparison_p || unsigned_comparison_p)
11722 && CONST_INT_P (XEXP (op0, 1))
11723 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
11724 & GET_MODE_MASK (mode))
11725 + 1)) >= 0
11726 && const_op >> i == 0
11727 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
11728 && (TRULY_NOOP_TRUNCATION_MODES_P (tmode, GET_MODE (op0))
11729 || (REG_P (XEXP (op0, 0))
11730 && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
11732 op0 = gen_lowpart (tmode, XEXP (op0, 0));
11733 continue;
11736 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11737 fits in both M1 and M2 and the SUBREG is either paradoxical
11738 or represents the low part, permute the SUBREG and the AND
11739 and try again. */
11740 if (GET_CODE (XEXP (op0, 0)) == SUBREG)
11742 unsigned HOST_WIDE_INT c1;
11743 tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
11744 /* Require an integral mode, to avoid creating something like
11745 (AND:SF ...). */
11746 if (SCALAR_INT_MODE_P (tmode)
11747 /* It is unsafe to commute the AND into the SUBREG if the
11748 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11749 not defined. As originally written the upper bits
11750 have a defined value due to the AND operation.
11751 However, if we commute the AND inside the SUBREG then
11752 they no longer have defined values and the meaning of
11753 the code has been changed. */
11754 && (0
11755 #ifdef WORD_REGISTER_OPERATIONS
11756 || (mode_width > GET_MODE_PRECISION (tmode)
11757 && mode_width <= BITS_PER_WORD)
11758 #endif
11759 || (mode_width <= GET_MODE_PRECISION (tmode)
11760 && subreg_lowpart_p (XEXP (op0, 0))))
11761 && CONST_INT_P (XEXP (op0, 1))
11762 && mode_width <= HOST_BITS_PER_WIDE_INT
11763 && HWI_COMPUTABLE_MODE_P (tmode)
11764 && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0
11765 && (c1 & ~GET_MODE_MASK (tmode)) == 0
11766 && c1 != mask
11767 && c1 != GET_MODE_MASK (tmode))
11769 op0 = simplify_gen_binary (AND, tmode,
11770 SUBREG_REG (XEXP (op0, 0)),
11771 gen_int_mode (c1, tmode));
11772 op0 = gen_lowpart (mode, op0);
11773 continue;
11777 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
11778 if (const_op == 0 && equality_comparison_p
11779 && XEXP (op0, 1) == const1_rtx
11780 && GET_CODE (XEXP (op0, 0)) == NOT)
11782 op0 = simplify_and_const_int (NULL_RTX, mode,
11783 XEXP (XEXP (op0, 0), 0), 1);
11784 code = (code == NE ? EQ : NE);
11785 continue;
11788 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
11789 (eq (and (lshiftrt X) 1) 0).
11790 Also handle the case where (not X) is expressed using xor. */
11791 if (const_op == 0 && equality_comparison_p
11792 && XEXP (op0, 1) == const1_rtx
11793 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
11795 rtx shift_op = XEXP (XEXP (op0, 0), 0);
11796 rtx shift_count = XEXP (XEXP (op0, 0), 1);
11798 if (GET_CODE (shift_op) == NOT
11799 || (GET_CODE (shift_op) == XOR
11800 && CONST_INT_P (XEXP (shift_op, 1))
11801 && CONST_INT_P (shift_count)
11802 && HWI_COMPUTABLE_MODE_P (mode)
11803 && (UINTVAL (XEXP (shift_op, 1))
11804 == (unsigned HOST_WIDE_INT) 1
11805 << INTVAL (shift_count))))
11808 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
11809 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11810 code = (code == NE ? EQ : NE);
11811 continue;
11814 break;
11816 case ASHIFT:
11817 /* If we have (compare (ashift FOO N) (const_int C)) and
11818 the high order N bits of FOO (N+1 if an inequality comparison)
11819 are known to be zero, we can do this by comparing FOO with C
11820 shifted right N bits so long as the low-order N bits of C are
11821 zero. */
11822 if (CONST_INT_P (XEXP (op0, 1))
11823 && INTVAL (XEXP (op0, 1)) >= 0
11824 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
11825 < HOST_BITS_PER_WIDE_INT)
11826 && (((unsigned HOST_WIDE_INT) const_op
11827 & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
11828 - 1)) == 0)
11829 && mode_width <= HOST_BITS_PER_WIDE_INT
11830 && (nonzero_bits (XEXP (op0, 0), mode)
11831 & ~(mask >> (INTVAL (XEXP (op0, 1))
11832 + ! equality_comparison_p))) == 0)
11834 /* We must perform a logical shift, not an arithmetic one,
11835 as we want the top N bits of C to be zero. */
11836 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
11838 temp >>= INTVAL (XEXP (op0, 1));
11839 op1 = gen_int_mode (temp, mode);
11840 op0 = XEXP (op0, 0);
11841 continue;
11844 /* If we are doing a sign bit comparison, it means we are testing
11845 a particular bit. Convert it to the appropriate AND. */
11846 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
11847 && mode_width <= HOST_BITS_PER_WIDE_INT)
11849 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11850 ((unsigned HOST_WIDE_INT) 1
11851 << (mode_width - 1
11852 - INTVAL (XEXP (op0, 1)))));
11853 code = (code == LT ? NE : EQ);
11854 continue;
11857 /* If this an equality comparison with zero and we are shifting
11858 the low bit to the sign bit, we can convert this to an AND of the
11859 low-order bit. */
11860 if (const_op == 0 && equality_comparison_p
11861 && CONST_INT_P (XEXP (op0, 1))
11862 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11864 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
11865 continue;
11867 break;
11869 case ASHIFTRT:
11870 /* If this is an equality comparison with zero, we can do this
11871 as a logical shift, which might be much simpler. */
11872 if (equality_comparison_p && const_op == 0
11873 && CONST_INT_P (XEXP (op0, 1)))
11875 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
11876 XEXP (op0, 0),
11877 INTVAL (XEXP (op0, 1)));
11878 continue;
11881 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
11882 do the comparison in a narrower mode. */
11883 if (! unsigned_comparison_p
11884 && CONST_INT_P (XEXP (op0, 1))
11885 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11886 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11887 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11888 MODE_INT, 1)) != BLKmode
11889 && (((unsigned HOST_WIDE_INT) const_op
11890 + (GET_MODE_MASK (tmode) >> 1) + 1)
11891 <= GET_MODE_MASK (tmode)))
11893 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
11894 continue;
11897 /* Likewise if OP0 is a PLUS of a sign extension with a
11898 constant, which is usually represented with the PLUS
11899 between the shifts. */
11900 if (! unsigned_comparison_p
11901 && CONST_INT_P (XEXP (op0, 1))
11902 && GET_CODE (XEXP (op0, 0)) == PLUS
11903 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11904 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
11905 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
11906 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11907 MODE_INT, 1)) != BLKmode
11908 && (((unsigned HOST_WIDE_INT) const_op
11909 + (GET_MODE_MASK (tmode) >> 1) + 1)
11910 <= GET_MODE_MASK (tmode)))
11912 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
11913 rtx add_const = XEXP (XEXP (op0, 0), 1);
11914 rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
11915 add_const, XEXP (op0, 1));
11917 op0 = simplify_gen_binary (PLUS, tmode,
11918 gen_lowpart (tmode, inner),
11919 new_const);
11920 continue;
11923 /* ... fall through ... */
11924 case LSHIFTRT:
11925 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
11926 the low order N bits of FOO are known to be zero, we can do this
11927 by comparing FOO with C shifted left N bits so long as no
11928 overflow occurs. Even if the low order N bits of FOO aren't known
11929 to be zero, if the comparison is >= or < we can use the same
11930 optimization and for > or <= by setting all the low
11931 order N bits in the comparison constant. */
11932 if (CONST_INT_P (XEXP (op0, 1))
11933 && INTVAL (XEXP (op0, 1)) > 0
11934 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11935 && mode_width <= HOST_BITS_PER_WIDE_INT
11936 && (((unsigned HOST_WIDE_INT) const_op
11937 + (GET_CODE (op0) != LSHIFTRT
11938 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
11939 + 1)
11940 : 0))
11941 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
11943 unsigned HOST_WIDE_INT low_bits
11944 = (nonzero_bits (XEXP (op0, 0), mode)
11945 & (((unsigned HOST_WIDE_INT) 1
11946 << INTVAL (XEXP (op0, 1))) - 1));
11947 if (low_bits == 0 || !equality_comparison_p)
11949 /* If the shift was logical, then we must make the condition
11950 unsigned. */
11951 if (GET_CODE (op0) == LSHIFTRT)
11952 code = unsigned_condition (code);
11954 const_op <<= INTVAL (XEXP (op0, 1));
11955 if (low_bits != 0
11956 && (code == GT || code == GTU
11957 || code == LE || code == LEU))
11958 const_op
11959 |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
11960 op1 = GEN_INT (const_op);
11961 op0 = XEXP (op0, 0);
11962 continue;
11966 /* If we are using this shift to extract just the sign bit, we
11967 can replace this with an LT or GE comparison. */
11968 if (const_op == 0
11969 && (equality_comparison_p || sign_bit_comparison_p)
11970 && CONST_INT_P (XEXP (op0, 1))
11971 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11973 op0 = XEXP (op0, 0);
11974 code = (code == NE || code == GT ? LT : GE);
11975 continue;
11977 break;
11979 default:
11980 break;
11983 break;
11986 /* Now make any compound operations involved in this comparison. Then,
11987 check for an outmost SUBREG on OP0 that is not doing anything or is
11988 paradoxical. The latter transformation must only be performed when
11989 it is known that the "extra" bits will be the same in op0 and op1 or
11990 that they don't matter. There are three cases to consider:
11992 1. SUBREG_REG (op0) is a register. In this case the bits are don't
11993 care bits and we can assume they have any convenient value. So
11994 making the transformation is safe.
11996 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
11997 In this case the upper bits of op0 are undefined. We should not make
11998 the simplification in that case as we do not know the contents of
11999 those bits.
12001 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
12002 UNKNOWN. In that case we know those bits are zeros or ones. We must
12003 also be sure that they are the same as the upper bits of op1.
12005 We can never remove a SUBREG for a non-equality comparison because
12006 the sign bit is in a different place in the underlying object. */
12008 op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
12009 op1 = make_compound_operation (op1, SET);
12011 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12012 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
12013 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
12014 && (code == NE || code == EQ))
12016 if (paradoxical_subreg_p (op0))
12018 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12019 implemented. */
12020 if (REG_P (SUBREG_REG (op0)))
12022 op0 = SUBREG_REG (op0);
12023 op1 = gen_lowpart (GET_MODE (op0), op1);
12026 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
12027 <= HOST_BITS_PER_WIDE_INT)
12028 && (nonzero_bits (SUBREG_REG (op0),
12029 GET_MODE (SUBREG_REG (op0)))
12030 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12032 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
12034 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
12035 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12036 op0 = SUBREG_REG (op0), op1 = tem;
12040 /* We now do the opposite procedure: Some machines don't have compare
12041 insns in all modes. If OP0's mode is an integer mode smaller than a
12042 word and we can't do a compare in that mode, see if there is a larger
12043 mode for which we can do the compare. There are a number of cases in
12044 which we can use the wider mode. */
12046 mode = GET_MODE (op0);
12047 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
12048 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12049 && ! have_insn_for (COMPARE, mode))
12050 for (tmode = GET_MODE_WIDER_MODE (mode);
12051 (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
12052 tmode = GET_MODE_WIDER_MODE (tmode))
12053 if (have_insn_for (COMPARE, tmode))
12055 int zero_extended;
12057 /* If this is a test for negative, we can make an explicit
12058 test of the sign bit. Test this first so we can use
12059 a paradoxical subreg to extend OP0. */
12061 if (op1 == const0_rtx && (code == LT || code == GE)
12062 && HWI_COMPUTABLE_MODE_P (mode))
12064 op0 = simplify_gen_binary (AND, tmode,
12065 gen_lowpart (tmode, op0),
12066 GEN_INT ((unsigned HOST_WIDE_INT) 1
12067 << (GET_MODE_BITSIZE (mode)
12068 - 1)));
12069 code = (code == LT) ? NE : EQ;
12070 break;
12073 /* If the only nonzero bits in OP0 and OP1 are those in the
12074 narrower mode and this is an equality or unsigned comparison,
12075 we can use the wider mode. Similarly for sign-extended
12076 values, in which case it is true for all comparisons. */
12077 zero_extended = ((code == EQ || code == NE
12078 || code == GEU || code == GTU
12079 || code == LEU || code == LTU)
12080 && (nonzero_bits (op0, tmode)
12081 & ~GET_MODE_MASK (mode)) == 0
12082 && ((CONST_INT_P (op1)
12083 || (nonzero_bits (op1, tmode)
12084 & ~GET_MODE_MASK (mode)) == 0)));
12086 if (zero_extended
12087 || ((num_sign_bit_copies (op0, tmode)
12088 > (unsigned int) (GET_MODE_PRECISION (tmode)
12089 - GET_MODE_PRECISION (mode)))
12090 && (num_sign_bit_copies (op1, tmode)
12091 > (unsigned int) (GET_MODE_PRECISION (tmode)
12092 - GET_MODE_PRECISION (mode)))))
12094 /* If OP0 is an AND and we don't have an AND in MODE either,
12095 make a new AND in the proper mode. */
12096 if (GET_CODE (op0) == AND
12097 && !have_insn_for (AND, mode))
12098 op0 = simplify_gen_binary (AND, tmode,
12099 gen_lowpart (tmode,
12100 XEXP (op0, 0)),
12101 gen_lowpart (tmode,
12102 XEXP (op0, 1)));
12103 else
12105 if (zero_extended)
12107 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
12108 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
12110 else
12112 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
12113 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
12115 break;
12120 #ifdef CANONICALIZE_COMPARISON
12121 /* If this machine only supports a subset of valid comparisons, see if we
12122 can convert an unsupported one into a supported one. */
12123 CANONICALIZE_COMPARISON (code, op0, op1);
12124 #endif
12126 *pop0 = op0;
12127 *pop1 = op1;
12129 return code;
12132 /* Utility function for record_value_for_reg. Count number of
12133 rtxs in X. */
12134 static int
12135 count_rtxs (rtx x)
12137 enum rtx_code code = GET_CODE (x);
12138 const char *fmt;
12139 int i, j, ret = 1;
12141 if (GET_RTX_CLASS (code) == '2'
12142 || GET_RTX_CLASS (code) == 'c')
12144 rtx x0 = XEXP (x, 0);
12145 rtx x1 = XEXP (x, 1);
12147 if (x0 == x1)
12148 return 1 + 2 * count_rtxs (x0);
12150 if ((GET_RTX_CLASS (GET_CODE (x1)) == '2'
12151 || GET_RTX_CLASS (GET_CODE (x1)) == 'c')
12152 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12153 return 2 + 2 * count_rtxs (x0)
12154 + count_rtxs (x == XEXP (x1, 0)
12155 ? XEXP (x1, 1) : XEXP (x1, 0));
12157 if ((GET_RTX_CLASS (GET_CODE (x0)) == '2'
12158 || GET_RTX_CLASS (GET_CODE (x0)) == 'c')
12159 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12160 return 2 + 2 * count_rtxs (x1)
12161 + count_rtxs (x == XEXP (x0, 0)
12162 ? XEXP (x0, 1) : XEXP (x0, 0));
12165 fmt = GET_RTX_FORMAT (code);
12166 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12167 if (fmt[i] == 'e')
12168 ret += count_rtxs (XEXP (x, i));
12169 else if (fmt[i] == 'E')
12170 for (j = 0; j < XVECLEN (x, i); j++)
12171 ret += count_rtxs (XVECEXP (x, i, j));
12173 return ret;
12176 /* Utility function for following routine. Called when X is part of a value
12177 being stored into last_set_value. Sets last_set_table_tick
12178 for each register mentioned. Similar to mention_regs in cse.c */
12180 static void
12181 update_table_tick (rtx x)
12183 enum rtx_code code = GET_CODE (x);
12184 const char *fmt = GET_RTX_FORMAT (code);
12185 int i, j;
12187 if (code == REG)
12189 unsigned int regno = REGNO (x);
12190 unsigned int endregno = END_REGNO (x);
12191 unsigned int r;
12193 for (r = regno; r < endregno; r++)
12195 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, r);
12196 rsp->last_set_table_tick = label_tick;
12199 return;
12202 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12203 if (fmt[i] == 'e')
12205 /* Check for identical subexpressions. If x contains
12206 identical subexpression we only have to traverse one of
12207 them. */
12208 if (i == 0 && ARITHMETIC_P (x))
12210 /* Note that at this point x1 has already been
12211 processed. */
12212 rtx x0 = XEXP (x, 0);
12213 rtx x1 = XEXP (x, 1);
12215 /* If x0 and x1 are identical then there is no need to
12216 process x0. */
12217 if (x0 == x1)
12218 break;
12220 /* If x0 is identical to a subexpression of x1 then while
12221 processing x1, x0 has already been processed. Thus we
12222 are done with x. */
12223 if (ARITHMETIC_P (x1)
12224 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12225 break;
12227 /* If x1 is identical to a subexpression of x0 then we
12228 still have to process the rest of x0. */
12229 if (ARITHMETIC_P (x0)
12230 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12232 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12233 break;
12237 update_table_tick (XEXP (x, i));
12239 else if (fmt[i] == 'E')
12240 for (j = 0; j < XVECLEN (x, i); j++)
12241 update_table_tick (XVECEXP (x, i, j));
12244 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12245 are saying that the register is clobbered and we no longer know its
12246 value. If INSN is zero, don't update reg_stat[].last_set; this is
12247 only permitted with VALUE also zero and is used to invalidate the
12248 register. */
12250 static void
12251 record_value_for_reg (rtx reg, rtx insn, rtx value)
12253 unsigned int regno = REGNO (reg);
12254 unsigned int endregno = END_REGNO (reg);
12255 unsigned int i;
12256 reg_stat_type *rsp;
12258 /* If VALUE contains REG and we have a previous value for REG, substitute
12259 the previous value. */
12260 if (value && insn && reg_overlap_mentioned_p (reg, value))
12262 rtx tem;
12264 /* Set things up so get_last_value is allowed to see anything set up to
12265 our insn. */
12266 subst_low_luid = DF_INSN_LUID (insn);
12267 tem = get_last_value (reg);
12269 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12270 it isn't going to be useful and will take a lot of time to process,
12271 so just use the CLOBBER. */
12273 if (tem)
12275 if (ARITHMETIC_P (tem)
12276 && GET_CODE (XEXP (tem, 0)) == CLOBBER
12277 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12278 tem = XEXP (tem, 0);
12279 else if (count_occurrences (value, reg, 1) >= 2)
12281 /* If there are two or more occurrences of REG in VALUE,
12282 prevent the value from growing too much. */
12283 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12284 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12287 value = replace_rtx (copy_rtx (value), reg, tem);
12291 /* For each register modified, show we don't know its value, that
12292 we don't know about its bitwise content, that its value has been
12293 updated, and that we don't know the location of the death of the
12294 register. */
12295 for (i = regno; i < endregno; i++)
12297 rsp = VEC_index (reg_stat_type, reg_stat, i);
12299 if (insn)
12300 rsp->last_set = insn;
12302 rsp->last_set_value = 0;
12303 rsp->last_set_mode = VOIDmode;
12304 rsp->last_set_nonzero_bits = 0;
12305 rsp->last_set_sign_bit_copies = 0;
12306 rsp->last_death = 0;
12307 rsp->truncated_to_mode = VOIDmode;
12310 /* Mark registers that are being referenced in this value. */
12311 if (value)
12312 update_table_tick (value);
12314 /* Now update the status of each register being set.
12315 If someone is using this register in this block, set this register
12316 to invalid since we will get confused between the two lives in this
12317 basic block. This makes using this register always invalid. In cse, we
12318 scan the table to invalidate all entries using this register, but this
12319 is too much work for us. */
12321 for (i = regno; i < endregno; i++)
12323 rsp = VEC_index (reg_stat_type, reg_stat, i);
12324 rsp->last_set_label = label_tick;
12325 if (!insn
12326 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12327 rsp->last_set_invalid = 1;
12328 else
12329 rsp->last_set_invalid = 0;
12332 /* The value being assigned might refer to X (like in "x++;"). In that
12333 case, we must replace it with (clobber (const_int 0)) to prevent
12334 infinite loops. */
12335 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12336 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12338 value = copy_rtx (value);
12339 if (!get_last_value_validate (&value, insn, label_tick, 1))
12340 value = 0;
12343 /* For the main register being modified, update the value, the mode, the
12344 nonzero bits, and the number of sign bit copies. */
12346 rsp->last_set_value = value;
12348 if (value)
12350 enum machine_mode mode = GET_MODE (reg);
12351 subst_low_luid = DF_INSN_LUID (insn);
12352 rsp->last_set_mode = mode;
12353 if (GET_MODE_CLASS (mode) == MODE_INT
12354 && HWI_COMPUTABLE_MODE_P (mode))
12355 mode = nonzero_bits_mode;
12356 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12357 rsp->last_set_sign_bit_copies
12358 = num_sign_bit_copies (value, GET_MODE (reg));
12362 /* Called via note_stores from record_dead_and_set_regs to handle one
12363 SET or CLOBBER in an insn. DATA is the instruction in which the
12364 set is occurring. */
12366 static void
12367 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12369 rtx record_dead_insn = (rtx) data;
12371 if (GET_CODE (dest) == SUBREG)
12372 dest = SUBREG_REG (dest);
12374 if (!record_dead_insn)
12376 if (REG_P (dest))
12377 record_value_for_reg (dest, NULL_RTX, NULL_RTX);
12378 return;
12381 if (REG_P (dest))
12383 /* If we are setting the whole register, we know its value. Otherwise
12384 show that we don't know the value. We can handle SUBREG in
12385 some cases. */
12386 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12387 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12388 else if (GET_CODE (setter) == SET
12389 && GET_CODE (SET_DEST (setter)) == SUBREG
12390 && SUBREG_REG (SET_DEST (setter)) == dest
12391 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
12392 && subreg_lowpart_p (SET_DEST (setter)))
12393 record_value_for_reg (dest, record_dead_insn,
12394 gen_lowpart (GET_MODE (dest),
12395 SET_SRC (setter)));
12396 else
12397 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12399 else if (MEM_P (dest)
12400 /* Ignore pushes, they clobber nothing. */
12401 && ! push_operand (dest, GET_MODE (dest)))
12402 mem_last_set = DF_INSN_LUID (record_dead_insn);
12405 /* Update the records of when each REG was most recently set or killed
12406 for the things done by INSN. This is the last thing done in processing
12407 INSN in the combiner loop.
12409 We update reg_stat[], in particular fields last_set, last_set_value,
12410 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12411 last_death, and also the similar information mem_last_set (which insn
12412 most recently modified memory) and last_call_luid (which insn was the
12413 most recent subroutine call). */
12415 static void
12416 record_dead_and_set_regs (rtx insn)
12418 rtx link;
12419 unsigned int i;
12421 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12423 if (REG_NOTE_KIND (link) == REG_DEAD
12424 && REG_P (XEXP (link, 0)))
12426 unsigned int regno = REGNO (XEXP (link, 0));
12427 unsigned int endregno = END_REGNO (XEXP (link, 0));
12429 for (i = regno; i < endregno; i++)
12431 reg_stat_type *rsp;
12433 rsp = VEC_index (reg_stat_type, reg_stat, i);
12434 rsp->last_death = insn;
12437 else if (REG_NOTE_KIND (link) == REG_INC)
12438 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12441 if (CALL_P (insn))
12443 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12444 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
12446 reg_stat_type *rsp;
12448 rsp = VEC_index (reg_stat_type, reg_stat, i);
12449 rsp->last_set_invalid = 1;
12450 rsp->last_set = insn;
12451 rsp->last_set_value = 0;
12452 rsp->last_set_mode = VOIDmode;
12453 rsp->last_set_nonzero_bits = 0;
12454 rsp->last_set_sign_bit_copies = 0;
12455 rsp->last_death = 0;
12456 rsp->truncated_to_mode = VOIDmode;
12459 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12461 /* We can't combine into a call pattern. Remember, though, that
12462 the return value register is set at this LUID. We could
12463 still replace a register with the return value from the
12464 wrong subroutine call! */
12465 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12467 else
12468 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12471 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12472 register present in the SUBREG, so for each such SUBREG go back and
12473 adjust nonzero and sign bit information of the registers that are
12474 known to have some zero/sign bits set.
12476 This is needed because when combine blows the SUBREGs away, the
12477 information on zero/sign bits is lost and further combines can be
12478 missed because of that. */
12480 static void
12481 record_promoted_value (rtx insn, rtx subreg)
12483 struct insn_link *links;
12484 rtx set;
12485 unsigned int regno = REGNO (SUBREG_REG (subreg));
12486 enum machine_mode mode = GET_MODE (subreg);
12488 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
12489 return;
12491 for (links = LOG_LINKS (insn); links;)
12493 reg_stat_type *rsp;
12495 insn = links->insn;
12496 set = single_set (insn);
12498 if (! set || !REG_P (SET_DEST (set))
12499 || REGNO (SET_DEST (set)) != regno
12500 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12502 links = links->next;
12503 continue;
12506 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12507 if (rsp->last_set == insn)
12509 if (SUBREG_PROMOTED_UNSIGNED_P (subreg) > 0)
12510 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12513 if (REG_P (SET_SRC (set)))
12515 regno = REGNO (SET_SRC (set));
12516 links = LOG_LINKS (insn);
12518 else
12519 break;
12523 /* Check if X, a register, is known to contain a value already
12524 truncated to MODE. In this case we can use a subreg to refer to
12525 the truncated value even though in the generic case we would need
12526 an explicit truncation. */
12528 static bool
12529 reg_truncated_to_mode (enum machine_mode mode, const_rtx x)
12531 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
12532 enum machine_mode truncated = rsp->truncated_to_mode;
12534 if (truncated == 0
12535 || rsp->truncation_label < label_tick_ebb_start)
12536 return false;
12537 if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12538 return true;
12539 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
12540 return true;
12541 return false;
12544 /* Callback for for_each_rtx. If *P is a hard reg or a subreg record the mode
12545 that the register is accessed in. For non-TRULY_NOOP_TRUNCATION targets we
12546 might be able to turn a truncate into a subreg using this information.
12547 Return -1 if traversing *P is complete or 0 otherwise. */
12549 static int
12550 record_truncated_value (rtx *p, void *data ATTRIBUTE_UNUSED)
12552 rtx x = *p;
12553 enum machine_mode truncated_mode;
12554 reg_stat_type *rsp;
12556 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12558 enum machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12559 truncated_mode = GET_MODE (x);
12561 if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12562 return -1;
12564 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
12565 return -1;
12567 x = SUBREG_REG (x);
12569 /* ??? For hard-regs we now record everything. We might be able to
12570 optimize this using last_set_mode. */
12571 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
12572 truncated_mode = GET_MODE (x);
12573 else
12574 return 0;
12576 rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
12577 if (rsp->truncated_to_mode == 0
12578 || rsp->truncation_label < label_tick_ebb_start
12579 || (GET_MODE_SIZE (truncated_mode)
12580 < GET_MODE_SIZE (rsp->truncated_to_mode)))
12582 rsp->truncated_to_mode = truncated_mode;
12583 rsp->truncation_label = label_tick;
12586 return -1;
12589 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12590 the modes they are used in. This can help truning TRUNCATEs into
12591 SUBREGs. */
12593 static void
12594 record_truncated_values (rtx *x, void *data ATTRIBUTE_UNUSED)
12596 for_each_rtx (x, record_truncated_value, NULL);
12599 /* Scan X for promoted SUBREGs. For each one found,
12600 note what it implies to the registers used in it. */
12602 static void
12603 check_promoted_subreg (rtx insn, rtx x)
12605 if (GET_CODE (x) == SUBREG
12606 && SUBREG_PROMOTED_VAR_P (x)
12607 && REG_P (SUBREG_REG (x)))
12608 record_promoted_value (insn, x);
12609 else
12611 const char *format = GET_RTX_FORMAT (GET_CODE (x));
12612 int i, j;
12614 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
12615 switch (format[i])
12617 case 'e':
12618 check_promoted_subreg (insn, XEXP (x, i));
12619 break;
12620 case 'V':
12621 case 'E':
12622 if (XVEC (x, i) != 0)
12623 for (j = 0; j < XVECLEN (x, i); j++)
12624 check_promoted_subreg (insn, XVECEXP (x, i, j));
12625 break;
12630 /* Verify that all the registers and memory references mentioned in *LOC are
12631 still valid. *LOC was part of a value set in INSN when label_tick was
12632 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12633 the invalid references with (clobber (const_int 0)) and return 1. This
12634 replacement is useful because we often can get useful information about
12635 the form of a value (e.g., if it was produced by a shift that always
12636 produces -1 or 0) even though we don't know exactly what registers it
12637 was produced from. */
12639 static int
12640 get_last_value_validate (rtx *loc, rtx insn, int tick, int replace)
12642 rtx x = *loc;
12643 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
12644 int len = GET_RTX_LENGTH (GET_CODE (x));
12645 int i, j;
12647 if (REG_P (x))
12649 unsigned int regno = REGNO (x);
12650 unsigned int endregno = END_REGNO (x);
12651 unsigned int j;
12653 for (j = regno; j < endregno; j++)
12655 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, j);
12656 if (rsp->last_set_invalid
12657 /* If this is a pseudo-register that was only set once and not
12658 live at the beginning of the function, it is always valid. */
12659 || (! (regno >= FIRST_PSEUDO_REGISTER
12660 && REG_N_SETS (regno) == 1
12661 && (!REGNO_REG_SET_P
12662 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno)))
12663 && rsp->last_set_label > tick))
12665 if (replace)
12666 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12667 return replace;
12671 return 1;
12673 /* If this is a memory reference, make sure that there were no stores after
12674 it that might have clobbered the value. We don't have alias info, so we
12675 assume any store invalidates it. Moreover, we only have local UIDs, so
12676 we also assume that there were stores in the intervening basic blocks. */
12677 else if (MEM_P (x) && !MEM_READONLY_P (x)
12678 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
12680 if (replace)
12681 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12682 return replace;
12685 for (i = 0; i < len; i++)
12687 if (fmt[i] == 'e')
12689 /* Check for identical subexpressions. If x contains
12690 identical subexpression we only have to traverse one of
12691 them. */
12692 if (i == 1 && ARITHMETIC_P (x))
12694 /* Note that at this point x0 has already been checked
12695 and found valid. */
12696 rtx x0 = XEXP (x, 0);
12697 rtx x1 = XEXP (x, 1);
12699 /* If x0 and x1 are identical then x is also valid. */
12700 if (x0 == x1)
12701 return 1;
12703 /* If x1 is identical to a subexpression of x0 then
12704 while checking x0, x1 has already been checked. Thus
12705 it is valid and so as x. */
12706 if (ARITHMETIC_P (x0)
12707 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12708 return 1;
12710 /* If x0 is identical to a subexpression of x1 then x is
12711 valid iff the rest of x1 is valid. */
12712 if (ARITHMETIC_P (x1)
12713 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12714 return
12715 get_last_value_validate (&XEXP (x1,
12716 x0 == XEXP (x1, 0) ? 1 : 0),
12717 insn, tick, replace);
12720 if (get_last_value_validate (&XEXP (x, i), insn, tick,
12721 replace) == 0)
12722 return 0;
12724 else if (fmt[i] == 'E')
12725 for (j = 0; j < XVECLEN (x, i); j++)
12726 if (get_last_value_validate (&XVECEXP (x, i, j),
12727 insn, tick, replace) == 0)
12728 return 0;
12731 /* If we haven't found a reason for it to be invalid, it is valid. */
12732 return 1;
12735 /* Get the last value assigned to X, if known. Some registers
12736 in the value may be replaced with (clobber (const_int 0)) if their value
12737 is known longer known reliably. */
12739 static rtx
12740 get_last_value (const_rtx x)
12742 unsigned int regno;
12743 rtx value;
12744 reg_stat_type *rsp;
12746 /* If this is a non-paradoxical SUBREG, get the value of its operand and
12747 then convert it to the desired mode. If this is a paradoxical SUBREG,
12748 we cannot predict what values the "extra" bits might have. */
12749 if (GET_CODE (x) == SUBREG
12750 && subreg_lowpart_p (x)
12751 && !paradoxical_subreg_p (x)
12752 && (value = get_last_value (SUBREG_REG (x))) != 0)
12753 return gen_lowpart (GET_MODE (x), value);
12755 if (!REG_P (x))
12756 return 0;
12758 regno = REGNO (x);
12759 rsp = VEC_index (reg_stat_type, reg_stat, regno);
12760 value = rsp->last_set_value;
12762 /* If we don't have a value, or if it isn't for this basic block and
12763 it's either a hard register, set more than once, or it's a live
12764 at the beginning of the function, return 0.
12766 Because if it's not live at the beginning of the function then the reg
12767 is always set before being used (is never used without being set).
12768 And, if it's set only once, and it's always set before use, then all
12769 uses must have the same last value, even if it's not from this basic
12770 block. */
12772 if (value == 0
12773 || (rsp->last_set_label < label_tick_ebb_start
12774 && (regno < FIRST_PSEUDO_REGISTER
12775 || REG_N_SETS (regno) != 1
12776 || REGNO_REG_SET_P
12777 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))))
12778 return 0;
12780 /* If the value was set in a later insn than the ones we are processing,
12781 we can't use it even if the register was only set once. */
12782 if (rsp->last_set_label == label_tick
12783 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
12784 return 0;
12786 /* If the value has all its registers valid, return it. */
12787 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
12788 return value;
12790 /* Otherwise, make a copy and replace any invalid register with
12791 (clobber (const_int 0)). If that fails for some reason, return 0. */
12793 value = copy_rtx (value);
12794 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
12795 return value;
12797 return 0;
12800 /* Return nonzero if expression X refers to a REG or to memory
12801 that is set in an instruction more recent than FROM_LUID. */
12803 static int
12804 use_crosses_set_p (const_rtx x, int from_luid)
12806 const char *fmt;
12807 int i;
12808 enum rtx_code code = GET_CODE (x);
12810 if (code == REG)
12812 unsigned int regno = REGNO (x);
12813 unsigned endreg = END_REGNO (x);
12815 #ifdef PUSH_ROUNDING
12816 /* Don't allow uses of the stack pointer to be moved,
12817 because we don't know whether the move crosses a push insn. */
12818 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
12819 return 1;
12820 #endif
12821 for (; regno < endreg; regno++)
12823 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, regno);
12824 if (rsp->last_set
12825 && rsp->last_set_label == label_tick
12826 && DF_INSN_LUID (rsp->last_set) > from_luid)
12827 return 1;
12829 return 0;
12832 if (code == MEM && mem_last_set > from_luid)
12833 return 1;
12835 fmt = GET_RTX_FORMAT (code);
12837 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12839 if (fmt[i] == 'E')
12841 int j;
12842 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12843 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
12844 return 1;
12846 else if (fmt[i] == 'e'
12847 && use_crosses_set_p (XEXP (x, i), from_luid))
12848 return 1;
12850 return 0;
12853 /* Define three variables used for communication between the following
12854 routines. */
12856 static unsigned int reg_dead_regno, reg_dead_endregno;
12857 static int reg_dead_flag;
12859 /* Function called via note_stores from reg_dead_at_p.
12861 If DEST is within [reg_dead_regno, reg_dead_endregno), set
12862 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
12864 static void
12865 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
12867 unsigned int regno, endregno;
12869 if (!REG_P (dest))
12870 return;
12872 regno = REGNO (dest);
12873 endregno = END_REGNO (dest);
12874 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
12875 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
12878 /* Return nonzero if REG is known to be dead at INSN.
12880 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
12881 referencing REG, it is dead. If we hit a SET referencing REG, it is
12882 live. Otherwise, see if it is live or dead at the start of the basic
12883 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
12884 must be assumed to be always live. */
12886 static int
12887 reg_dead_at_p (rtx reg, rtx insn)
12889 basic_block block;
12890 unsigned int i;
12892 /* Set variables for reg_dead_at_p_1. */
12893 reg_dead_regno = REGNO (reg);
12894 reg_dead_endregno = END_REGNO (reg);
12896 reg_dead_flag = 0;
12898 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
12899 we allow the machine description to decide whether use-and-clobber
12900 patterns are OK. */
12901 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
12903 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12904 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
12905 return 0;
12908 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
12909 beginning of basic block. */
12910 block = BLOCK_FOR_INSN (insn);
12911 for (;;)
12913 if (INSN_P (insn))
12915 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
12916 if (reg_dead_flag)
12917 return reg_dead_flag == 1 ? 1 : 0;
12919 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
12920 return 1;
12923 if (insn == BB_HEAD (block))
12924 break;
12926 insn = PREV_INSN (insn);
12929 /* Look at live-in sets for the basic block that we were in. */
12930 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12931 if (REGNO_REG_SET_P (df_get_live_in (block), i))
12932 return 0;
12934 return 1;
12937 /* Note hard registers in X that are used. */
12939 static void
12940 mark_used_regs_combine (rtx x)
12942 RTX_CODE code = GET_CODE (x);
12943 unsigned int regno;
12944 int i;
12946 switch (code)
12948 case LABEL_REF:
12949 case SYMBOL_REF:
12950 case CONST_INT:
12951 case CONST:
12952 case CONST_DOUBLE:
12953 case CONST_VECTOR:
12954 case PC:
12955 case ADDR_VEC:
12956 case ADDR_DIFF_VEC:
12957 case ASM_INPUT:
12958 #ifdef HAVE_cc0
12959 /* CC0 must die in the insn after it is set, so we don't need to take
12960 special note of it here. */
12961 case CC0:
12962 #endif
12963 return;
12965 case CLOBBER:
12966 /* If we are clobbering a MEM, mark any hard registers inside the
12967 address as used. */
12968 if (MEM_P (XEXP (x, 0)))
12969 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
12970 return;
12972 case REG:
12973 regno = REGNO (x);
12974 /* A hard reg in a wide mode may really be multiple registers.
12975 If so, mark all of them just like the first. */
12976 if (regno < FIRST_PSEUDO_REGISTER)
12978 /* None of this applies to the stack, frame or arg pointers. */
12979 if (regno == STACK_POINTER_REGNUM
12980 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
12981 || regno == HARD_FRAME_POINTER_REGNUM
12982 #endif
12983 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
12984 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
12985 #endif
12986 || regno == FRAME_POINTER_REGNUM)
12987 return;
12989 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
12991 return;
12993 case SET:
12995 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
12996 the address. */
12997 rtx testreg = SET_DEST (x);
12999 while (GET_CODE (testreg) == SUBREG
13000 || GET_CODE (testreg) == ZERO_EXTRACT
13001 || GET_CODE (testreg) == STRICT_LOW_PART)
13002 testreg = XEXP (testreg, 0);
13004 if (MEM_P (testreg))
13005 mark_used_regs_combine (XEXP (testreg, 0));
13007 mark_used_regs_combine (SET_SRC (x));
13009 return;
13011 default:
13012 break;
13015 /* Recursively scan the operands of this expression. */
13018 const char *fmt = GET_RTX_FORMAT (code);
13020 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13022 if (fmt[i] == 'e')
13023 mark_used_regs_combine (XEXP (x, i));
13024 else if (fmt[i] == 'E')
13026 int j;
13028 for (j = 0; j < XVECLEN (x, i); j++)
13029 mark_used_regs_combine (XVECEXP (x, i, j));
13035 /* Remove register number REGNO from the dead registers list of INSN.
13037 Return the note used to record the death, if there was one. */
13040 remove_death (unsigned int regno, rtx insn)
13042 rtx note = find_regno_note (insn, REG_DEAD, regno);
13044 if (note)
13045 remove_note (insn, note);
13047 return note;
13050 /* For each register (hardware or pseudo) used within expression X, if its
13051 death is in an instruction with luid between FROM_LUID (inclusive) and
13052 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13053 list headed by PNOTES.
13055 That said, don't move registers killed by maybe_kill_insn.
13057 This is done when X is being merged by combination into TO_INSN. These
13058 notes will then be distributed as needed. */
13060 static void
13061 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx to_insn,
13062 rtx *pnotes)
13064 const char *fmt;
13065 int len, i;
13066 enum rtx_code code = GET_CODE (x);
13068 if (code == REG)
13070 unsigned int regno = REGNO (x);
13071 rtx where_dead = VEC_index (reg_stat_type, reg_stat, regno)->last_death;
13073 /* Don't move the register if it gets killed in between from and to. */
13074 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13075 && ! reg_referenced_p (x, maybe_kill_insn))
13076 return;
13078 if (where_dead
13079 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13080 && DF_INSN_LUID (where_dead) >= from_luid
13081 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13083 rtx note = remove_death (regno, where_dead);
13085 /* It is possible for the call above to return 0. This can occur
13086 when last_death points to I2 or I1 that we combined with.
13087 In that case make a new note.
13089 We must also check for the case where X is a hard register
13090 and NOTE is a death note for a range of hard registers
13091 including X. In that case, we must put REG_DEAD notes for
13092 the remaining registers in place of NOTE. */
13094 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13095 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13096 > GET_MODE_SIZE (GET_MODE (x))))
13098 unsigned int deadregno = REGNO (XEXP (note, 0));
13099 unsigned int deadend = END_HARD_REGNO (XEXP (note, 0));
13100 unsigned int ourend = END_HARD_REGNO (x);
13101 unsigned int i;
13103 for (i = deadregno; i < deadend; i++)
13104 if (i < regno || i >= ourend)
13105 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13108 /* If we didn't find any note, or if we found a REG_DEAD note that
13109 covers only part of the given reg, and we have a multi-reg hard
13110 register, then to be safe we must check for REG_DEAD notes
13111 for each register other than the first. They could have
13112 their own REG_DEAD notes lying around. */
13113 else if ((note == 0
13114 || (note != 0
13115 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13116 < GET_MODE_SIZE (GET_MODE (x)))))
13117 && regno < FIRST_PSEUDO_REGISTER
13118 && hard_regno_nregs[regno][GET_MODE (x)] > 1)
13120 unsigned int ourend = END_HARD_REGNO (x);
13121 unsigned int i, offset;
13122 rtx oldnotes = 0;
13124 if (note)
13125 offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
13126 else
13127 offset = 1;
13129 for (i = regno + offset; i < ourend; i++)
13130 move_deaths (regno_reg_rtx[i],
13131 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13134 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13136 XEXP (note, 1) = *pnotes;
13137 *pnotes = note;
13139 else
13140 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13143 return;
13146 else if (GET_CODE (x) == SET)
13148 rtx dest = SET_DEST (x);
13150 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13152 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13153 that accesses one word of a multi-word item, some
13154 piece of everything register in the expression is used by
13155 this insn, so remove any old death. */
13156 /* ??? So why do we test for equality of the sizes? */
13158 if (GET_CODE (dest) == ZERO_EXTRACT
13159 || GET_CODE (dest) == STRICT_LOW_PART
13160 || (GET_CODE (dest) == SUBREG
13161 && (((GET_MODE_SIZE (GET_MODE (dest))
13162 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13163 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13164 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13166 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13167 return;
13170 /* If this is some other SUBREG, we know it replaces the entire
13171 value, so use that as the destination. */
13172 if (GET_CODE (dest) == SUBREG)
13173 dest = SUBREG_REG (dest);
13175 /* If this is a MEM, adjust deaths of anything used in the address.
13176 For a REG (the only other possibility), the entire value is
13177 being replaced so the old value is not used in this insn. */
13179 if (MEM_P (dest))
13180 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13181 to_insn, pnotes);
13182 return;
13185 else if (GET_CODE (x) == CLOBBER)
13186 return;
13188 len = GET_RTX_LENGTH (code);
13189 fmt = GET_RTX_FORMAT (code);
13191 for (i = 0; i < len; i++)
13193 if (fmt[i] == 'E')
13195 int j;
13196 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13197 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13198 to_insn, pnotes);
13200 else if (fmt[i] == 'e')
13201 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13205 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13206 pattern of an insn. X must be a REG. */
13208 static int
13209 reg_bitfield_target_p (rtx x, rtx body)
13211 int i;
13213 if (GET_CODE (body) == SET)
13215 rtx dest = SET_DEST (body);
13216 rtx target;
13217 unsigned int regno, tregno, endregno, endtregno;
13219 if (GET_CODE (dest) == ZERO_EXTRACT)
13220 target = XEXP (dest, 0);
13221 else if (GET_CODE (dest) == STRICT_LOW_PART)
13222 target = SUBREG_REG (XEXP (dest, 0));
13223 else
13224 return 0;
13226 if (GET_CODE (target) == SUBREG)
13227 target = SUBREG_REG (target);
13229 if (!REG_P (target))
13230 return 0;
13232 tregno = REGNO (target), regno = REGNO (x);
13233 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13234 return target == x;
13236 endtregno = end_hard_regno (GET_MODE (target), tregno);
13237 endregno = end_hard_regno (GET_MODE (x), regno);
13239 return endregno > tregno && regno < endtregno;
13242 else if (GET_CODE (body) == PARALLEL)
13243 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13244 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13245 return 1;
13247 return 0;
13250 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13251 as appropriate. I3 and I2 are the insns resulting from the combination
13252 insns including FROM (I2 may be zero).
13254 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13255 not need REG_DEAD notes because they are being substituted for. This
13256 saves searching in the most common cases.
13258 Each note in the list is either ignored or placed on some insns, depending
13259 on the type of note. */
13261 static void
13262 distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
13263 rtx elim_i1, rtx elim_i0)
13265 rtx note, next_note;
13266 rtx tem;
13268 for (note = notes; note; note = next_note)
13270 rtx place = 0, place2 = 0;
13272 next_note = XEXP (note, 1);
13273 switch (REG_NOTE_KIND (note))
13275 case REG_BR_PROB:
13276 case REG_BR_PRED:
13277 /* Doesn't matter much where we put this, as long as it's somewhere.
13278 It is preferable to keep these notes on branches, which is most
13279 likely to be i3. */
13280 place = i3;
13281 break;
13283 case REG_NON_LOCAL_GOTO:
13284 if (JUMP_P (i3))
13285 place = i3;
13286 else
13288 gcc_assert (i2 && JUMP_P (i2));
13289 place = i2;
13291 break;
13293 case REG_EH_REGION:
13294 /* These notes must remain with the call or trapping instruction. */
13295 if (CALL_P (i3))
13296 place = i3;
13297 else if (i2 && CALL_P (i2))
13298 place = i2;
13299 else
13301 gcc_assert (cfun->can_throw_non_call_exceptions);
13302 if (may_trap_p (i3))
13303 place = i3;
13304 else if (i2 && may_trap_p (i2))
13305 place = i2;
13306 /* ??? Otherwise assume we've combined things such that we
13307 can now prove that the instructions can't trap. Drop the
13308 note in this case. */
13310 break;
13312 case REG_ARGS_SIZE:
13313 /* ??? How to distribute between i3-i1. Assume i3 contains the
13314 entire adjustment. Assert i3 contains at least some adjust. */
13315 if (!noop_move_p (i3))
13317 int old_size, args_size = INTVAL (XEXP (note, 0));
13318 /* fixup_args_size_notes looks at REG_NORETURN note,
13319 so ensure the note is placed there first. */
13320 if (CALL_P (i3))
13322 rtx *np;
13323 for (np = &next_note; *np; np = &XEXP (*np, 1))
13324 if (REG_NOTE_KIND (*np) == REG_NORETURN)
13326 rtx n = *np;
13327 *np = XEXP (n, 1);
13328 XEXP (n, 1) = REG_NOTES (i3);
13329 REG_NOTES (i3) = n;
13330 break;
13333 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
13334 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13335 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13336 gcc_assert (old_size != args_size
13337 || (CALL_P (i3)
13338 && !ACCUMULATE_OUTGOING_ARGS
13339 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
13341 break;
13343 case REG_NORETURN:
13344 case REG_SETJMP:
13345 case REG_TM:
13346 /* These notes must remain with the call. It should not be
13347 possible for both I2 and I3 to be a call. */
13348 if (CALL_P (i3))
13349 place = i3;
13350 else
13352 gcc_assert (i2 && CALL_P (i2));
13353 place = i2;
13355 break;
13357 case REG_UNUSED:
13358 /* Any clobbers for i3 may still exist, and so we must process
13359 REG_UNUSED notes from that insn.
13361 Any clobbers from i2 or i1 can only exist if they were added by
13362 recog_for_combine. In that case, recog_for_combine created the
13363 necessary REG_UNUSED notes. Trying to keep any original
13364 REG_UNUSED notes from these insns can cause incorrect output
13365 if it is for the same register as the original i3 dest.
13366 In that case, we will notice that the register is set in i3,
13367 and then add a REG_UNUSED note for the destination of i3, which
13368 is wrong. However, it is possible to have REG_UNUSED notes from
13369 i2 or i1 for register which were both used and clobbered, so
13370 we keep notes from i2 or i1 if they will turn into REG_DEAD
13371 notes. */
13373 /* If this register is set or clobbered in I3, put the note there
13374 unless there is one already. */
13375 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13377 if (from_insn != i3)
13378 break;
13380 if (! (REG_P (XEXP (note, 0))
13381 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13382 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13383 place = i3;
13385 /* Otherwise, if this register is used by I3, then this register
13386 now dies here, so we must put a REG_DEAD note here unless there
13387 is one already. */
13388 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13389 && ! (REG_P (XEXP (note, 0))
13390 ? find_regno_note (i3, REG_DEAD,
13391 REGNO (XEXP (note, 0)))
13392 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13394 PUT_REG_NOTE_KIND (note, REG_DEAD);
13395 place = i3;
13397 break;
13399 case REG_EQUAL:
13400 case REG_EQUIV:
13401 case REG_NOALIAS:
13402 /* These notes say something about results of an insn. We can
13403 only support them if they used to be on I3 in which case they
13404 remain on I3. Otherwise they are ignored.
13406 If the note refers to an expression that is not a constant, we
13407 must also ignore the note since we cannot tell whether the
13408 equivalence is still true. It might be possible to do
13409 slightly better than this (we only have a problem if I2DEST
13410 or I1DEST is present in the expression), but it doesn't
13411 seem worth the trouble. */
13413 if (from_insn == i3
13414 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13415 place = i3;
13416 break;
13418 case REG_INC:
13419 /* These notes say something about how a register is used. They must
13420 be present on any use of the register in I2 or I3. */
13421 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13422 place = i3;
13424 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13426 if (place)
13427 place2 = i2;
13428 else
13429 place = i2;
13431 break;
13433 case REG_LABEL_TARGET:
13434 case REG_LABEL_OPERAND:
13435 /* This can show up in several ways -- either directly in the
13436 pattern, or hidden off in the constant pool with (or without?)
13437 a REG_EQUAL note. */
13438 /* ??? Ignore the without-reg_equal-note problem for now. */
13439 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13440 || ((tem = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13441 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13442 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0)))
13443 place = i3;
13445 if (i2
13446 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13447 || ((tem = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13448 && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13449 && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0))))
13451 if (place)
13452 place2 = i2;
13453 else
13454 place = i2;
13457 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13458 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13459 there. */
13460 if (place && JUMP_P (place)
13461 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13462 && (JUMP_LABEL (place) == NULL
13463 || JUMP_LABEL (place) == XEXP (note, 0)))
13465 rtx label = JUMP_LABEL (place);
13467 if (!label)
13468 JUMP_LABEL (place) = XEXP (note, 0);
13469 else if (LABEL_P (label))
13470 LABEL_NUSES (label)--;
13473 if (place2 && JUMP_P (place2)
13474 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13475 && (JUMP_LABEL (place2) == NULL
13476 || JUMP_LABEL (place2) == XEXP (note, 0)))
13478 rtx label = JUMP_LABEL (place2);
13480 if (!label)
13481 JUMP_LABEL (place2) = XEXP (note, 0);
13482 else if (LABEL_P (label))
13483 LABEL_NUSES (label)--;
13484 place2 = 0;
13486 break;
13488 case REG_NONNEG:
13489 /* This note says something about the value of a register prior
13490 to the execution of an insn. It is too much trouble to see
13491 if the note is still correct in all situations. It is better
13492 to simply delete it. */
13493 break;
13495 case REG_DEAD:
13496 /* If we replaced the right hand side of FROM_INSN with a
13497 REG_EQUAL note, the original use of the dying register
13498 will not have been combined into I3 and I2. In such cases,
13499 FROM_INSN is guaranteed to be the first of the combined
13500 instructions, so we simply need to search back before
13501 FROM_INSN for the previous use or set of this register,
13502 then alter the notes there appropriately.
13504 If the register is used as an input in I3, it dies there.
13505 Similarly for I2, if it is nonzero and adjacent to I3.
13507 If the register is not used as an input in either I3 or I2
13508 and it is not one of the registers we were supposed to eliminate,
13509 there are two possibilities. We might have a non-adjacent I2
13510 or we might have somehow eliminated an additional register
13511 from a computation. For example, we might have had A & B where
13512 we discover that B will always be zero. In this case we will
13513 eliminate the reference to A.
13515 In both cases, we must search to see if we can find a previous
13516 use of A and put the death note there. */
13518 if (from_insn
13519 && from_insn == i2mod
13520 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13521 tem = from_insn;
13522 else
13524 if (from_insn
13525 && CALL_P (from_insn)
13526 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13527 place = from_insn;
13528 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13529 place = i3;
13530 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13531 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13532 place = i2;
13533 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13534 && !(i2mod
13535 && reg_overlap_mentioned_p (XEXP (note, 0),
13536 i2mod_old_rhs)))
13537 || rtx_equal_p (XEXP (note, 0), elim_i1)
13538 || rtx_equal_p (XEXP (note, 0), elim_i0))
13539 break;
13540 tem = i3;
13543 if (place == 0)
13545 basic_block bb = this_basic_block;
13547 for (tem = PREV_INSN (tem); place == 0; tem = PREV_INSN (tem))
13549 if (!NONDEBUG_INSN_P (tem))
13551 if (tem == BB_HEAD (bb))
13552 break;
13553 continue;
13556 /* If the register is being set at TEM, see if that is all
13557 TEM is doing. If so, delete TEM. Otherwise, make this
13558 into a REG_UNUSED note instead. Don't delete sets to
13559 global register vars. */
13560 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
13561 || !global_regs[REGNO (XEXP (note, 0))])
13562 && reg_set_p (XEXP (note, 0), PATTERN (tem)))
13564 rtx set = single_set (tem);
13565 rtx inner_dest = 0;
13566 #ifdef HAVE_cc0
13567 rtx cc0_setter = NULL_RTX;
13568 #endif
13570 if (set != 0)
13571 for (inner_dest = SET_DEST (set);
13572 (GET_CODE (inner_dest) == STRICT_LOW_PART
13573 || GET_CODE (inner_dest) == SUBREG
13574 || GET_CODE (inner_dest) == ZERO_EXTRACT);
13575 inner_dest = XEXP (inner_dest, 0))
13578 /* Verify that it was the set, and not a clobber that
13579 modified the register.
13581 CC0 targets must be careful to maintain setter/user
13582 pairs. If we cannot delete the setter due to side
13583 effects, mark the user with an UNUSED note instead
13584 of deleting it. */
13586 if (set != 0 && ! side_effects_p (SET_SRC (set))
13587 && rtx_equal_p (XEXP (note, 0), inner_dest)
13588 #ifdef HAVE_cc0
13589 && (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
13590 || ((cc0_setter = prev_cc0_setter (tem)) != NULL
13591 && sets_cc0_p (PATTERN (cc0_setter)) > 0))
13592 #endif
13595 /* Move the notes and links of TEM elsewhere.
13596 This might delete other dead insns recursively.
13597 First set the pattern to something that won't use
13598 any register. */
13599 rtx old_notes = REG_NOTES (tem);
13601 PATTERN (tem) = pc_rtx;
13602 REG_NOTES (tem) = NULL;
13604 distribute_notes (old_notes, tem, tem, NULL_RTX,
13605 NULL_RTX, NULL_RTX, NULL_RTX);
13606 distribute_links (LOG_LINKS (tem));
13608 SET_INSN_DELETED (tem);
13609 if (tem == i2)
13610 i2 = NULL_RTX;
13612 #ifdef HAVE_cc0
13613 /* Delete the setter too. */
13614 if (cc0_setter)
13616 PATTERN (cc0_setter) = pc_rtx;
13617 old_notes = REG_NOTES (cc0_setter);
13618 REG_NOTES (cc0_setter) = NULL;
13620 distribute_notes (old_notes, cc0_setter,
13621 cc0_setter, NULL_RTX,
13622 NULL_RTX, NULL_RTX, NULL_RTX);
13623 distribute_links (LOG_LINKS (cc0_setter));
13625 SET_INSN_DELETED (cc0_setter);
13626 if (cc0_setter == i2)
13627 i2 = NULL_RTX;
13629 #endif
13631 else
13633 PUT_REG_NOTE_KIND (note, REG_UNUSED);
13635 /* If there isn't already a REG_UNUSED note, put one
13636 here. Do not place a REG_DEAD note, even if
13637 the register is also used here; that would not
13638 match the algorithm used in lifetime analysis
13639 and can cause the consistency check in the
13640 scheduler to fail. */
13641 if (! find_regno_note (tem, REG_UNUSED,
13642 REGNO (XEXP (note, 0))))
13643 place = tem;
13644 break;
13647 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
13648 || (CALL_P (tem)
13649 && find_reg_fusage (tem, USE, XEXP (note, 0))))
13651 place = tem;
13653 /* If we are doing a 3->2 combination, and we have a
13654 register which formerly died in i3 and was not used
13655 by i2, which now no longer dies in i3 and is used in
13656 i2 but does not die in i2, and place is between i2
13657 and i3, then we may need to move a link from place to
13658 i2. */
13659 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
13660 && from_insn
13661 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
13662 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13664 struct insn_link *links = LOG_LINKS (place);
13665 LOG_LINKS (place) = NULL;
13666 distribute_links (links);
13668 break;
13671 if (tem == BB_HEAD (bb))
13672 break;
13677 /* If the register is set or already dead at PLACE, we needn't do
13678 anything with this note if it is still a REG_DEAD note.
13679 We check here if it is set at all, not if is it totally replaced,
13680 which is what `dead_or_set_p' checks, so also check for it being
13681 set partially. */
13683 if (place && REG_NOTE_KIND (note) == REG_DEAD)
13685 unsigned int regno = REGNO (XEXP (note, 0));
13686 reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, regno);
13688 if (dead_or_set_p (place, XEXP (note, 0))
13689 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
13691 /* Unless the register previously died in PLACE, clear
13692 last_death. [I no longer understand why this is
13693 being done.] */
13694 if (rsp->last_death != place)
13695 rsp->last_death = 0;
13696 place = 0;
13698 else
13699 rsp->last_death = place;
13701 /* If this is a death note for a hard reg that is occupying
13702 multiple registers, ensure that we are still using all
13703 parts of the object. If we find a piece of the object
13704 that is unused, we must arrange for an appropriate REG_DEAD
13705 note to be added for it. However, we can't just emit a USE
13706 and tag the note to it, since the register might actually
13707 be dead; so we recourse, and the recursive call then finds
13708 the previous insn that used this register. */
13710 if (place && regno < FIRST_PSEUDO_REGISTER
13711 && hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1)
13713 unsigned int endregno = END_HARD_REGNO (XEXP (note, 0));
13714 int all_used = 1;
13715 unsigned int i;
13717 for (i = regno; i < endregno; i++)
13718 if ((! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
13719 && ! find_regno_fusage (place, USE, i))
13720 || dead_or_set_regno_p (place, i))
13721 all_used = 0;
13723 if (! all_used)
13725 /* Put only REG_DEAD notes for pieces that are
13726 not already dead or set. */
13728 for (i = regno; i < endregno;
13729 i += hard_regno_nregs[i][reg_raw_mode[i]])
13731 rtx piece = regno_reg_rtx[i];
13732 basic_block bb = this_basic_block;
13734 if (! dead_or_set_p (place, piece)
13735 && ! reg_bitfield_target_p (piece,
13736 PATTERN (place)))
13738 rtx new_note = alloc_reg_note (REG_DEAD, piece,
13739 NULL_RTX);
13741 distribute_notes (new_note, place, place,
13742 NULL_RTX, NULL_RTX, NULL_RTX,
13743 NULL_RTX);
13745 else if (! refers_to_regno_p (i, i + 1,
13746 PATTERN (place), 0)
13747 && ! find_regno_fusage (place, USE, i))
13748 for (tem = PREV_INSN (place); ;
13749 tem = PREV_INSN (tem))
13751 if (!NONDEBUG_INSN_P (tem))
13753 if (tem == BB_HEAD (bb))
13754 break;
13755 continue;
13757 if (dead_or_set_p (tem, piece)
13758 || reg_bitfield_target_p (piece,
13759 PATTERN (tem)))
13761 add_reg_note (tem, REG_UNUSED, piece);
13762 break;
13768 place = 0;
13772 break;
13774 default:
13775 /* Any other notes should not be present at this point in the
13776 compilation. */
13777 gcc_unreachable ();
13780 if (place)
13782 XEXP (note, 1) = REG_NOTES (place);
13783 REG_NOTES (place) = note;
13786 if (place2)
13787 add_reg_note (place2, REG_NOTE_KIND (note), XEXP (note, 0));
13791 /* Similarly to above, distribute the LOG_LINKS that used to be present on
13792 I3, I2, and I1 to new locations. This is also called to add a link
13793 pointing at I3 when I3's destination is changed. */
13795 static void
13796 distribute_links (struct insn_link *links)
13798 struct insn_link *link, *next_link;
13800 for (link = links; link; link = next_link)
13802 rtx place = 0;
13803 rtx insn;
13804 rtx set, reg;
13806 next_link = link->next;
13808 /* If the insn that this link points to is a NOTE or isn't a single
13809 set, ignore it. In the latter case, it isn't clear what we
13810 can do other than ignore the link, since we can't tell which
13811 register it was for. Such links wouldn't be used by combine
13812 anyway.
13814 It is not possible for the destination of the target of the link to
13815 have been changed by combine. The only potential of this is if we
13816 replace I3, I2, and I1 by I3 and I2. But in that case the
13817 destination of I2 also remains unchanged. */
13819 if (NOTE_P (link->insn)
13820 || (set = single_set (link->insn)) == 0)
13821 continue;
13823 reg = SET_DEST (set);
13824 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
13825 || GET_CODE (reg) == STRICT_LOW_PART)
13826 reg = XEXP (reg, 0);
13828 /* A LOG_LINK is defined as being placed on the first insn that uses
13829 a register and points to the insn that sets the register. Start
13830 searching at the next insn after the target of the link and stop
13831 when we reach a set of the register or the end of the basic block.
13833 Note that this correctly handles the link that used to point from
13834 I3 to I2. Also note that not much searching is typically done here
13835 since most links don't point very far away. */
13837 for (insn = NEXT_INSN (link->insn);
13838 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
13839 || BB_HEAD (this_basic_block->next_bb) != insn));
13840 insn = NEXT_INSN (insn))
13841 if (DEBUG_INSN_P (insn))
13842 continue;
13843 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
13845 if (reg_referenced_p (reg, PATTERN (insn)))
13846 place = insn;
13847 break;
13849 else if (CALL_P (insn)
13850 && find_reg_fusage (insn, USE, reg))
13852 place = insn;
13853 break;
13855 else if (INSN_P (insn) && reg_set_p (reg, insn))
13856 break;
13858 /* If we found a place to put the link, place it there unless there
13859 is already a link to the same insn as LINK at that point. */
13861 if (place)
13863 struct insn_link *link2;
13865 FOR_EACH_LOG_LINK (link2, place)
13866 if (link2->insn == link->insn)
13867 break;
13869 if (link2 == NULL)
13871 link->next = LOG_LINKS (place);
13872 LOG_LINKS (place) = link;
13874 /* Set added_links_insn to the earliest insn we added a
13875 link to. */
13876 if (added_links_insn == 0
13877 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
13878 added_links_insn = place;
13884 /* Subroutine of unmentioned_reg_p and callback from for_each_rtx.
13885 Check whether the expression pointer to by LOC is a register or
13886 memory, and if so return 1 if it isn't mentioned in the rtx EXPR.
13887 Otherwise return zero. */
13889 static int
13890 unmentioned_reg_p_1 (rtx *loc, void *expr)
13892 rtx x = *loc;
13894 if (x != NULL_RTX
13895 && (REG_P (x) || MEM_P (x))
13896 && ! reg_mentioned_p (x, (rtx) expr))
13897 return 1;
13898 return 0;
13901 /* Check for any register or memory mentioned in EQUIV that is not
13902 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
13903 of EXPR where some registers may have been replaced by constants. */
13905 static bool
13906 unmentioned_reg_p (rtx equiv, rtx expr)
13908 return for_each_rtx (&equiv, unmentioned_reg_p_1, expr);
13911 DEBUG_FUNCTION void
13912 dump_combine_stats (FILE *file)
13914 fprintf
13915 (file,
13916 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
13917 combine_attempts, combine_merges, combine_extras, combine_successes);
13920 void
13921 dump_combine_total_stats (FILE *file)
13923 fprintf
13924 (file,
13925 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
13926 total_attempts, total_merges, total_extras, total_successes);
13929 static bool
13930 gate_handle_combine (void)
13932 return (optimize > 0);
13935 /* Try combining insns through substitution. */
13936 static unsigned int
13937 rest_of_handle_combine (void)
13939 int rebuild_jump_labels_after_combine;
13941 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
13942 df_note_add_problem ();
13943 df_analyze ();
13945 regstat_init_n_sets_and_refs ();
13947 rebuild_jump_labels_after_combine
13948 = combine_instructions (get_insns (), max_reg_num ());
13950 /* Combining insns may have turned an indirect jump into a
13951 direct jump. Rebuild the JUMP_LABEL fields of jumping
13952 instructions. */
13953 if (rebuild_jump_labels_after_combine)
13955 timevar_push (TV_JUMP);
13956 rebuild_jump_labels (get_insns ());
13957 cleanup_cfg (0);
13958 timevar_pop (TV_JUMP);
13961 regstat_free_n_sets_and_refs ();
13962 return 0;
13965 struct rtl_opt_pass pass_combine =
13968 RTL_PASS,
13969 "combine", /* name */
13970 gate_handle_combine, /* gate */
13971 rest_of_handle_combine, /* execute */
13972 NULL, /* sub */
13973 NULL, /* next */
13974 0, /* static_pass_number */
13975 TV_COMBINE, /* tv_id */
13976 PROP_cfglayout, /* properties_required */
13977 0, /* properties_provided */
13978 0, /* properties_destroyed */
13979 0, /* todo_flags_start */
13980 TODO_df_finish | TODO_verify_rtl_sharing |
13981 TODO_ggc_collect, /* todo_flags_finish */