PR ipa/64481
[official-gcc.git] / gcc / combine.c
blob101cf3593c390286f0d4c74a74d64b81e497b41d
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "tm.h"
82 #include "rtl.h"
83 #include "hash-set.h"
84 #include "machmode.h"
85 #include "vec.h"
86 #include "double-int.h"
87 #include "input.h"
88 #include "alias.h"
89 #include "symtab.h"
90 #include "wide-int.h"
91 #include "inchash.h"
92 #include "tree.h"
93 #include "stor-layout.h"
94 #include "tm_p.h"
95 #include "flags.h"
96 #include "regs.h"
97 #include "hard-reg-set.h"
98 #include "predict.h"
99 #include "input.h"
100 #include "function.h"
101 #include "dominance.h"
102 #include "cfg.h"
103 #include "cfgrtl.h"
104 #include "cfgcleanup.h"
105 #include "basic-block.h"
106 #include "insn-config.h"
107 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
108 #include "expr.h"
109 #include "insn-attr.h"
110 #include "recog.h"
111 #include "diagnostic-core.h"
112 #include "target.h"
113 #include "insn-codes.h"
114 #include "optabs.h"
115 #include "rtlhooks-def.h"
116 #include "params.h"
117 #include "tree-pass.h"
118 #include "df.h"
119 #include "valtrack.h"
120 #include "hash-map.h"
121 #include "is-a.h"
122 #include "plugin-api.h"
123 #include "ipa-ref.h"
124 #include "cgraph.h"
125 #include "obstack.h"
126 #include "statistics.h"
127 #include "params.h"
128 #include "rtl-iter.h"
130 /* Number of attempts to combine instructions in this function. */
132 static int combine_attempts;
134 /* Number of attempts that got as far as substitution in this function. */
136 static int combine_merges;
138 /* Number of instructions combined with added SETs in this function. */
140 static int combine_extras;
142 /* Number of instructions combined in this function. */
144 static int combine_successes;
146 /* Totals over entire compilation. */
148 static int total_attempts, total_merges, total_extras, total_successes;
150 /* combine_instructions may try to replace the right hand side of the
151 second instruction with the value of an associated REG_EQUAL note
152 before throwing it at try_combine. That is problematic when there
153 is a REG_DEAD note for a register used in the old right hand side
154 and can cause distribute_notes to do wrong things. This is the
155 second instruction if it has been so modified, null otherwise. */
157 static rtx_insn *i2mod;
159 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
161 static rtx i2mod_old_rhs;
163 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
165 static rtx i2mod_new_rhs;
167 typedef struct reg_stat_struct {
168 /* Record last point of death of (hard or pseudo) register n. */
169 rtx_insn *last_death;
171 /* Record last point of modification of (hard or pseudo) register n. */
172 rtx_insn *last_set;
174 /* The next group of fields allows the recording of the last value assigned
175 to (hard or pseudo) register n. We use this information to see if an
176 operation being processed is redundant given a prior operation performed
177 on the register. For example, an `and' with a constant is redundant if
178 all the zero bits are already known to be turned off.
180 We use an approach similar to that used by cse, but change it in the
181 following ways:
183 (1) We do not want to reinitialize at each label.
184 (2) It is useful, but not critical, to know the actual value assigned
185 to a register. Often just its form is helpful.
187 Therefore, we maintain the following fields:
189 last_set_value the last value assigned
190 last_set_label records the value of label_tick when the
191 register was assigned
192 last_set_table_tick records the value of label_tick when a
193 value using the register is assigned
194 last_set_invalid set to nonzero when it is not valid
195 to use the value of this register in some
196 register's value
198 To understand the usage of these tables, it is important to understand
199 the distinction between the value in last_set_value being valid and
200 the register being validly contained in some other expression in the
201 table.
203 (The next two parameters are out of date).
205 reg_stat[i].last_set_value is valid if it is nonzero, and either
206 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
208 Register I may validly appear in any expression returned for the value
209 of another register if reg_n_sets[i] is 1. It may also appear in the
210 value for register J if reg_stat[j].last_set_invalid is zero, or
211 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
213 If an expression is found in the table containing a register which may
214 not validly appear in an expression, the register is replaced by
215 something that won't match, (clobber (const_int 0)). */
217 /* Record last value assigned to (hard or pseudo) register n. */
219 rtx last_set_value;
221 /* Record the value of label_tick when an expression involving register n
222 is placed in last_set_value. */
224 int last_set_table_tick;
226 /* Record the value of label_tick when the value for register n is placed in
227 last_set_value. */
229 int last_set_label;
231 /* These fields are maintained in parallel with last_set_value and are
232 used to store the mode in which the register was last set, the bits
233 that were known to be zero when it was last set, and the number of
234 sign bits copies it was known to have when it was last set. */
236 unsigned HOST_WIDE_INT last_set_nonzero_bits;
237 char last_set_sign_bit_copies;
238 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
240 /* Set nonzero if references to register n in expressions should not be
241 used. last_set_invalid is set nonzero when this register is being
242 assigned to and last_set_table_tick == label_tick. */
244 char last_set_invalid;
246 /* Some registers that are set more than once and used in more than one
247 basic block are nevertheless always set in similar ways. For example,
248 a QImode register may be loaded from memory in two places on a machine
249 where byte loads zero extend.
251 We record in the following fields if a register has some leading bits
252 that are always equal to the sign bit, and what we know about the
253 nonzero bits of a register, specifically which bits are known to be
254 zero.
256 If an entry is zero, it means that we don't know anything special. */
258 unsigned char sign_bit_copies;
260 unsigned HOST_WIDE_INT nonzero_bits;
262 /* Record the value of the label_tick when the last truncation
263 happened. The field truncated_to_mode is only valid if
264 truncation_label == label_tick. */
266 int truncation_label;
268 /* Record the last truncation seen for this register. If truncation
269 is not a nop to this mode we might be able to save an explicit
270 truncation if we know that value already contains a truncated
271 value. */
273 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
274 } reg_stat_type;
277 static vec<reg_stat_type> reg_stat;
279 /* Record the luid of the last insn that invalidated memory
280 (anything that writes memory, and subroutine calls, but not pushes). */
282 static int mem_last_set;
284 /* Record the luid of the last CALL_INSN
285 so we can tell whether a potential combination crosses any calls. */
287 static int last_call_luid;
289 /* When `subst' is called, this is the insn that is being modified
290 (by combining in a previous insn). The PATTERN of this insn
291 is still the old pattern partially modified and it should not be
292 looked at, but this may be used to examine the successors of the insn
293 to judge whether a simplification is valid. */
295 static rtx_insn *subst_insn;
297 /* This is the lowest LUID that `subst' is currently dealing with.
298 get_last_value will not return a value if the register was set at or
299 after this LUID. If not for this mechanism, we could get confused if
300 I2 or I1 in try_combine were an insn that used the old value of a register
301 to obtain a new value. In that case, we might erroneously get the
302 new value of the register when we wanted the old one. */
304 static int subst_low_luid;
306 /* This contains any hard registers that are used in newpat; reg_dead_at_p
307 must consider all these registers to be always live. */
309 static HARD_REG_SET newpat_used_regs;
311 /* This is an insn to which a LOG_LINKS entry has been added. If this
312 insn is the earlier than I2 or I3, combine should rescan starting at
313 that location. */
315 static rtx_insn *added_links_insn;
317 /* Basic block in which we are performing combines. */
318 static basic_block this_basic_block;
319 static bool optimize_this_for_speed_p;
322 /* Length of the currently allocated uid_insn_cost array. */
324 static int max_uid_known;
326 /* The following array records the insn_rtx_cost for every insn
327 in the instruction stream. */
329 static int *uid_insn_cost;
331 /* The following array records the LOG_LINKS for every insn in the
332 instruction stream as struct insn_link pointers. */
334 struct insn_link {
335 rtx_insn *insn;
336 unsigned int regno;
337 struct insn_link *next;
340 static struct insn_link **uid_log_links;
342 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
343 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
345 #define FOR_EACH_LOG_LINK(L, INSN) \
346 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
348 /* Links for LOG_LINKS are allocated from this obstack. */
350 static struct obstack insn_link_obstack;
352 /* Allocate a link. */
354 static inline struct insn_link *
355 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
357 struct insn_link *l
358 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
359 sizeof (struct insn_link));
360 l->insn = insn;
361 l->regno = regno;
362 l->next = next;
363 return l;
366 /* Incremented for each basic block. */
368 static int label_tick;
370 /* Reset to label_tick for each extended basic block in scanning order. */
372 static int label_tick_ebb_start;
374 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
375 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
377 static machine_mode nonzero_bits_mode;
379 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
380 be safely used. It is zero while computing them and after combine has
381 completed. This former test prevents propagating values based on
382 previously set values, which can be incorrect if a variable is modified
383 in a loop. */
385 static int nonzero_sign_valid;
388 /* Record one modification to rtl structure
389 to be undone by storing old_contents into *where. */
391 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
393 struct undo
395 struct undo *next;
396 enum undo_kind kind;
397 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
398 union { rtx *r; int *i; struct insn_link **l; } where;
401 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
402 num_undo says how many are currently recorded.
404 other_insn is nonzero if we have modified some other insn in the process
405 of working on subst_insn. It must be verified too. */
407 struct undobuf
409 struct undo *undos;
410 struct undo *frees;
411 rtx_insn *other_insn;
414 static struct undobuf undobuf;
416 /* Number of times the pseudo being substituted for
417 was found and replaced. */
419 static int n_occurrences;
421 static rtx reg_nonzero_bits_for_combine (const_rtx, machine_mode, const_rtx,
422 machine_mode,
423 unsigned HOST_WIDE_INT,
424 unsigned HOST_WIDE_INT *);
425 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, machine_mode, const_rtx,
426 machine_mode,
427 unsigned int, unsigned int *);
428 static void do_SUBST (rtx *, rtx);
429 static void do_SUBST_INT (int *, int);
430 static void init_reg_last (void);
431 static void setup_incoming_promotions (rtx_insn *);
432 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
433 static int cant_combine_insn_p (rtx_insn *);
434 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
435 rtx_insn *, rtx_insn *, rtx *, rtx *);
436 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
437 static int contains_muldiv (rtx);
438 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
439 int *, rtx_insn *);
440 static void undo_all (void);
441 static void undo_commit (void);
442 static rtx *find_split_point (rtx *, rtx_insn *, bool);
443 static rtx subst (rtx, rtx, rtx, int, int, int);
444 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
445 static rtx simplify_if_then_else (rtx);
446 static rtx simplify_set (rtx);
447 static rtx simplify_logical (rtx);
448 static rtx expand_compound_operation (rtx);
449 static const_rtx expand_field_assignment (const_rtx);
450 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
451 rtx, unsigned HOST_WIDE_INT, int, int, int);
452 static rtx extract_left_shift (rtx, int);
453 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
454 unsigned HOST_WIDE_INT *);
455 static rtx canon_reg_for_combine (rtx, rtx);
456 static rtx force_to_mode (rtx, machine_mode,
457 unsigned HOST_WIDE_INT, int);
458 static rtx if_then_else_cond (rtx, rtx *, rtx *);
459 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
460 static int rtx_equal_for_field_assignment_p (rtx, rtx);
461 static rtx make_field_assignment (rtx);
462 static rtx apply_distributive_law (rtx);
463 static rtx distribute_and_simplify_rtx (rtx, int);
464 static rtx simplify_and_const_int_1 (machine_mode, rtx,
465 unsigned HOST_WIDE_INT);
466 static rtx simplify_and_const_int (rtx, machine_mode, rtx,
467 unsigned HOST_WIDE_INT);
468 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
469 HOST_WIDE_INT, machine_mode, int *);
470 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
471 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
472 int);
473 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
474 static rtx gen_lowpart_for_combine (machine_mode, rtx);
475 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
476 rtx, rtx *);
477 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
478 static void update_table_tick (rtx);
479 static void record_value_for_reg (rtx, rtx_insn *, rtx);
480 static void check_promoted_subreg (rtx_insn *, rtx);
481 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
482 static void record_dead_and_set_regs (rtx_insn *);
483 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
484 static rtx get_last_value (const_rtx);
485 static int use_crosses_set_p (const_rtx, int);
486 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
487 static int reg_dead_at_p (rtx, rtx_insn *);
488 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
489 static int reg_bitfield_target_p (rtx, rtx);
490 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
491 static void distribute_links (struct insn_link *);
492 static void mark_used_regs_combine (rtx);
493 static void record_promoted_value (rtx_insn *, rtx);
494 static bool unmentioned_reg_p (rtx, rtx);
495 static void record_truncated_values (rtx *, void *);
496 static bool reg_truncated_to_mode (machine_mode, const_rtx);
497 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
500 /* It is not safe to use ordinary gen_lowpart in combine.
501 See comments in gen_lowpart_for_combine. */
502 #undef RTL_HOOKS_GEN_LOWPART
503 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
505 /* Our implementation of gen_lowpart never emits a new pseudo. */
506 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
507 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
509 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
510 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
512 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
513 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
515 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
516 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
518 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
521 /* Convenience wrapper for the canonicalize_comparison target hook.
522 Target hooks cannot use enum rtx_code. */
523 static inline void
524 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
525 bool op0_preserve_value)
527 int code_int = (int)*code;
528 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
529 *code = (enum rtx_code)code_int;
532 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
533 PATTERN can not be split. Otherwise, it returns an insn sequence.
534 This is a wrapper around split_insns which ensures that the
535 reg_stat vector is made larger if the splitter creates a new
536 register. */
538 static rtx_insn *
539 combine_split_insns (rtx pattern, rtx insn)
541 rtx_insn *ret;
542 unsigned int nregs;
544 ret = safe_as_a <rtx_insn *> (split_insns (pattern, insn));
545 nregs = max_reg_num ();
546 if (nregs > reg_stat.length ())
547 reg_stat.safe_grow_cleared (nregs);
548 return ret;
551 /* This is used by find_single_use to locate an rtx in LOC that
552 contains exactly one use of DEST, which is typically either a REG
553 or CC0. It returns a pointer to the innermost rtx expression
554 containing DEST. Appearances of DEST that are being used to
555 totally replace it are not counted. */
557 static rtx *
558 find_single_use_1 (rtx dest, rtx *loc)
560 rtx x = *loc;
561 enum rtx_code code = GET_CODE (x);
562 rtx *result = NULL;
563 rtx *this_result;
564 int i;
565 const char *fmt;
567 switch (code)
569 case CONST:
570 case LABEL_REF:
571 case SYMBOL_REF:
572 CASE_CONST_ANY:
573 case CLOBBER:
574 return 0;
576 case SET:
577 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
578 of a REG that occupies all of the REG, the insn uses DEST if
579 it is mentioned in the destination or the source. Otherwise, we
580 need just check the source. */
581 if (GET_CODE (SET_DEST (x)) != CC0
582 && GET_CODE (SET_DEST (x)) != PC
583 && !REG_P (SET_DEST (x))
584 && ! (GET_CODE (SET_DEST (x)) == SUBREG
585 && REG_P (SUBREG_REG (SET_DEST (x)))
586 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
587 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
588 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
589 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
590 break;
592 return find_single_use_1 (dest, &SET_SRC (x));
594 case MEM:
595 case SUBREG:
596 return find_single_use_1 (dest, &XEXP (x, 0));
598 default:
599 break;
602 /* If it wasn't one of the common cases above, check each expression and
603 vector of this code. Look for a unique usage of DEST. */
605 fmt = GET_RTX_FORMAT (code);
606 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
608 if (fmt[i] == 'e')
610 if (dest == XEXP (x, i)
611 || (REG_P (dest) && REG_P (XEXP (x, i))
612 && REGNO (dest) == REGNO (XEXP (x, i))))
613 this_result = loc;
614 else
615 this_result = find_single_use_1 (dest, &XEXP (x, i));
617 if (result == NULL)
618 result = this_result;
619 else if (this_result)
620 /* Duplicate usage. */
621 return NULL;
623 else if (fmt[i] == 'E')
625 int j;
627 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
629 if (XVECEXP (x, i, j) == dest
630 || (REG_P (dest)
631 && REG_P (XVECEXP (x, i, j))
632 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
633 this_result = loc;
634 else
635 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
637 if (result == NULL)
638 result = this_result;
639 else if (this_result)
640 return NULL;
645 return result;
649 /* See if DEST, produced in INSN, is used only a single time in the
650 sequel. If so, return a pointer to the innermost rtx expression in which
651 it is used.
653 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
655 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
656 care about REG_DEAD notes or LOG_LINKS.
658 Otherwise, we find the single use by finding an insn that has a
659 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
660 only referenced once in that insn, we know that it must be the first
661 and last insn referencing DEST. */
663 static rtx *
664 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
666 basic_block bb;
667 rtx_insn *next;
668 rtx *result;
669 struct insn_link *link;
671 #ifdef HAVE_cc0
672 if (dest == cc0_rtx)
674 next = NEXT_INSN (insn);
675 if (next == 0
676 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
677 return 0;
679 result = find_single_use_1 (dest, &PATTERN (next));
680 if (result && ploc)
681 *ploc = next;
682 return result;
684 #endif
686 if (!REG_P (dest))
687 return 0;
689 bb = BLOCK_FOR_INSN (insn);
690 for (next = NEXT_INSN (insn);
691 next && BLOCK_FOR_INSN (next) == bb;
692 next = NEXT_INSN (next))
693 if (INSN_P (next) && dead_or_set_p (next, dest))
695 FOR_EACH_LOG_LINK (link, next)
696 if (link->insn == insn && link->regno == REGNO (dest))
697 break;
699 if (link)
701 result = find_single_use_1 (dest, &PATTERN (next));
702 if (ploc)
703 *ploc = next;
704 return result;
708 return 0;
711 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
712 insn. The substitution can be undone by undo_all. If INTO is already
713 set to NEWVAL, do not record this change. Because computing NEWVAL might
714 also call SUBST, we have to compute it before we put anything into
715 the undo table. */
717 static void
718 do_SUBST (rtx *into, rtx newval)
720 struct undo *buf;
721 rtx oldval = *into;
723 if (oldval == newval)
724 return;
726 /* We'd like to catch as many invalid transformations here as
727 possible. Unfortunately, there are way too many mode changes
728 that are perfectly valid, so we'd waste too much effort for
729 little gain doing the checks here. Focus on catching invalid
730 transformations involving integer constants. */
731 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
732 && CONST_INT_P (newval))
734 /* Sanity check that we're replacing oldval with a CONST_INT
735 that is a valid sign-extension for the original mode. */
736 gcc_assert (INTVAL (newval)
737 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
739 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
740 CONST_INT is not valid, because after the replacement, the
741 original mode would be gone. Unfortunately, we can't tell
742 when do_SUBST is called to replace the operand thereof, so we
743 perform this test on oldval instead, checking whether an
744 invalid replacement took place before we got here. */
745 gcc_assert (!(GET_CODE (oldval) == SUBREG
746 && CONST_INT_P (SUBREG_REG (oldval))));
747 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
748 && CONST_INT_P (XEXP (oldval, 0))));
751 if (undobuf.frees)
752 buf = undobuf.frees, undobuf.frees = buf->next;
753 else
754 buf = XNEW (struct undo);
756 buf->kind = UNDO_RTX;
757 buf->where.r = into;
758 buf->old_contents.r = oldval;
759 *into = newval;
761 buf->next = undobuf.undos, undobuf.undos = buf;
764 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
766 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
767 for the value of a HOST_WIDE_INT value (including CONST_INT) is
768 not safe. */
770 static void
771 do_SUBST_INT (int *into, int newval)
773 struct undo *buf;
774 int oldval = *into;
776 if (oldval == newval)
777 return;
779 if (undobuf.frees)
780 buf = undobuf.frees, undobuf.frees = buf->next;
781 else
782 buf = XNEW (struct undo);
784 buf->kind = UNDO_INT;
785 buf->where.i = into;
786 buf->old_contents.i = oldval;
787 *into = newval;
789 buf->next = undobuf.undos, undobuf.undos = buf;
792 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
794 /* Similar to SUBST, but just substitute the mode. This is used when
795 changing the mode of a pseudo-register, so that any other
796 references to the entry in the regno_reg_rtx array will change as
797 well. */
799 static void
800 do_SUBST_MODE (rtx *into, machine_mode newval)
802 struct undo *buf;
803 machine_mode oldval = GET_MODE (*into);
805 if (oldval == newval)
806 return;
808 if (undobuf.frees)
809 buf = undobuf.frees, undobuf.frees = buf->next;
810 else
811 buf = XNEW (struct undo);
813 buf->kind = UNDO_MODE;
814 buf->where.r = into;
815 buf->old_contents.m = oldval;
816 adjust_reg_mode (*into, newval);
818 buf->next = undobuf.undos, undobuf.undos = buf;
821 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
823 #ifndef HAVE_cc0
824 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
826 static void
827 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
829 struct undo *buf;
830 struct insn_link * oldval = *into;
832 if (oldval == newval)
833 return;
835 if (undobuf.frees)
836 buf = undobuf.frees, undobuf.frees = buf->next;
837 else
838 buf = XNEW (struct undo);
840 buf->kind = UNDO_LINKS;
841 buf->where.l = into;
842 buf->old_contents.l = oldval;
843 *into = newval;
845 buf->next = undobuf.undos, undobuf.undos = buf;
848 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
849 #endif
851 /* Subroutine of try_combine. Determine whether the replacement patterns
852 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
853 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
854 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
855 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
856 of all the instructions can be estimated and the replacements are more
857 expensive than the original sequence. */
859 static bool
860 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
861 rtx newpat, rtx newi2pat, rtx newotherpat)
863 int i0_cost, i1_cost, i2_cost, i3_cost;
864 int new_i2_cost, new_i3_cost;
865 int old_cost, new_cost;
867 /* Lookup the original insn_rtx_costs. */
868 i2_cost = INSN_COST (i2);
869 i3_cost = INSN_COST (i3);
871 if (i1)
873 i1_cost = INSN_COST (i1);
874 if (i0)
876 i0_cost = INSN_COST (i0);
877 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
878 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
880 else
882 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
883 ? i1_cost + i2_cost + i3_cost : 0);
884 i0_cost = 0;
887 else
889 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
890 i1_cost = i0_cost = 0;
893 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
894 correct that. */
895 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
896 old_cost -= i1_cost;
899 /* Calculate the replacement insn_rtx_costs. */
900 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
901 if (newi2pat)
903 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
904 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
905 ? new_i2_cost + new_i3_cost : 0;
907 else
909 new_cost = new_i3_cost;
910 new_i2_cost = 0;
913 if (undobuf.other_insn)
915 int old_other_cost, new_other_cost;
917 old_other_cost = INSN_COST (undobuf.other_insn);
918 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
919 if (old_other_cost > 0 && new_other_cost > 0)
921 old_cost += old_other_cost;
922 new_cost += new_other_cost;
924 else
925 old_cost = 0;
928 /* Disallow this combination if both new_cost and old_cost are greater than
929 zero, and new_cost is greater than old cost. */
930 int reject = old_cost > 0 && new_cost > old_cost;
932 if (dump_file)
934 fprintf (dump_file, "%s combination of insns ",
935 reject ? "rejecting" : "allowing");
936 if (i0)
937 fprintf (dump_file, "%d, ", INSN_UID (i0));
938 if (i1 && INSN_UID (i1) != INSN_UID (i2))
939 fprintf (dump_file, "%d, ", INSN_UID (i1));
940 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
942 fprintf (dump_file, "original costs ");
943 if (i0)
944 fprintf (dump_file, "%d + ", i0_cost);
945 if (i1 && INSN_UID (i1) != INSN_UID (i2))
946 fprintf (dump_file, "%d + ", i1_cost);
947 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
949 if (newi2pat)
950 fprintf (dump_file, "replacement costs %d + %d = %d\n",
951 new_i2_cost, new_i3_cost, new_cost);
952 else
953 fprintf (dump_file, "replacement cost %d\n", new_cost);
956 if (reject)
957 return false;
959 /* Update the uid_insn_cost array with the replacement costs. */
960 INSN_COST (i2) = new_i2_cost;
961 INSN_COST (i3) = new_i3_cost;
962 if (i1)
964 INSN_COST (i1) = 0;
965 if (i0)
966 INSN_COST (i0) = 0;
969 return true;
973 /* Delete any insns that copy a register to itself. */
975 static void
976 delete_noop_moves (void)
978 rtx_insn *insn, *next;
979 basic_block bb;
981 FOR_EACH_BB_FN (bb, cfun)
983 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
985 next = NEXT_INSN (insn);
986 if (INSN_P (insn) && noop_move_p (insn))
988 if (dump_file)
989 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
991 delete_insn_and_edges (insn);
998 /* Return false if we do not want to (or cannot) combine DEF. */
999 static bool
1000 can_combine_def_p (df_ref def)
1002 /* Do not consider if it is pre/post modification in MEM. */
1003 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1004 return false;
1006 unsigned int regno = DF_REF_REGNO (def);
1008 /* Do not combine frame pointer adjustments. */
1009 if ((regno == FRAME_POINTER_REGNUM
1010 && (!reload_completed || frame_pointer_needed))
1011 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1012 || (regno == HARD_FRAME_POINTER_REGNUM
1013 && (!reload_completed || frame_pointer_needed))
1014 #endif
1015 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1016 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
1017 #endif
1019 return false;
1021 return true;
1024 /* Return false if we do not want to (or cannot) combine USE. */
1025 static bool
1026 can_combine_use_p (df_ref use)
1028 /* Do not consider the usage of the stack pointer by function call. */
1029 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1030 return false;
1032 return true;
1035 /* Fill in log links field for all insns. */
1037 static void
1038 create_log_links (void)
1040 basic_block bb;
1041 rtx_insn **next_use;
1042 rtx_insn *insn;
1043 df_ref def, use;
1045 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1047 /* Pass through each block from the end, recording the uses of each
1048 register and establishing log links when def is encountered.
1049 Note that we do not clear next_use array in order to save time,
1050 so we have to test whether the use is in the same basic block as def.
1052 There are a few cases below when we do not consider the definition or
1053 usage -- these are taken from original flow.c did. Don't ask me why it is
1054 done this way; I don't know and if it works, I don't want to know. */
1056 FOR_EACH_BB_FN (bb, cfun)
1058 FOR_BB_INSNS_REVERSE (bb, insn)
1060 if (!NONDEBUG_INSN_P (insn))
1061 continue;
1063 /* Log links are created only once. */
1064 gcc_assert (!LOG_LINKS (insn));
1066 FOR_EACH_INSN_DEF (def, insn)
1068 unsigned int regno = DF_REF_REGNO (def);
1069 rtx_insn *use_insn;
1071 if (!next_use[regno])
1072 continue;
1074 if (!can_combine_def_p (def))
1075 continue;
1077 use_insn = next_use[regno];
1078 next_use[regno] = NULL;
1080 if (BLOCK_FOR_INSN (use_insn) != bb)
1081 continue;
1083 /* flow.c claimed:
1085 We don't build a LOG_LINK for hard registers contained
1086 in ASM_OPERANDs. If these registers get replaced,
1087 we might wind up changing the semantics of the insn,
1088 even if reload can make what appear to be valid
1089 assignments later. */
1090 if (regno < FIRST_PSEUDO_REGISTER
1091 && asm_noperands (PATTERN (use_insn)) >= 0)
1092 continue;
1094 /* Don't add duplicate links between instructions. */
1095 struct insn_link *links;
1096 FOR_EACH_LOG_LINK (links, use_insn)
1097 if (insn == links->insn && regno == links->regno)
1098 break;
1100 if (!links)
1101 LOG_LINKS (use_insn)
1102 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1105 FOR_EACH_INSN_USE (use, insn)
1106 if (can_combine_use_p (use))
1107 next_use[DF_REF_REGNO (use)] = insn;
1111 free (next_use);
1114 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1115 true if we found a LOG_LINK that proves that A feeds B. This only works
1116 if there are no instructions between A and B which could have a link
1117 depending on A, since in that case we would not record a link for B.
1118 We also check the implicit dependency created by a cc0 setter/user
1119 pair. */
1121 static bool
1122 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1124 struct insn_link *links;
1125 FOR_EACH_LOG_LINK (links, b)
1126 if (links->insn == a)
1127 return true;
1128 #ifdef HAVE_cc0
1129 if (sets_cc0_p (a))
1130 return true;
1131 #endif
1132 return false;
1135 /* Main entry point for combiner. F is the first insn of the function.
1136 NREGS is the first unused pseudo-reg number.
1138 Return nonzero if the combiner has turned an indirect jump
1139 instruction into a direct jump. */
1140 static int
1141 combine_instructions (rtx_insn *f, unsigned int nregs)
1143 rtx_insn *insn, *next;
1144 #ifdef HAVE_cc0
1145 rtx_insn *prev;
1146 #endif
1147 struct insn_link *links, *nextlinks;
1148 rtx_insn *first;
1149 basic_block last_bb;
1151 int new_direct_jump_p = 0;
1153 for (first = f; first && !INSN_P (first); )
1154 first = NEXT_INSN (first);
1155 if (!first)
1156 return 0;
1158 combine_attempts = 0;
1159 combine_merges = 0;
1160 combine_extras = 0;
1161 combine_successes = 0;
1163 rtl_hooks = combine_rtl_hooks;
1165 reg_stat.safe_grow_cleared (nregs);
1167 init_recog_no_volatile ();
1169 /* Allocate array for insn info. */
1170 max_uid_known = get_max_uid ();
1171 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1172 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1173 gcc_obstack_init (&insn_link_obstack);
1175 nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1177 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1178 problems when, for example, we have j <<= 1 in a loop. */
1180 nonzero_sign_valid = 0;
1181 label_tick = label_tick_ebb_start = 1;
1183 /* Scan all SETs and see if we can deduce anything about what
1184 bits are known to be zero for some registers and how many copies
1185 of the sign bit are known to exist for those registers.
1187 Also set any known values so that we can use it while searching
1188 for what bits are known to be set. */
1190 setup_incoming_promotions (first);
1191 /* Allow the entry block and the first block to fall into the same EBB.
1192 Conceptually the incoming promotions are assigned to the entry block. */
1193 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1195 create_log_links ();
1196 FOR_EACH_BB_FN (this_basic_block, cfun)
1198 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1199 last_call_luid = 0;
1200 mem_last_set = -1;
1202 label_tick++;
1203 if (!single_pred_p (this_basic_block)
1204 || single_pred (this_basic_block) != last_bb)
1205 label_tick_ebb_start = label_tick;
1206 last_bb = this_basic_block;
1208 FOR_BB_INSNS (this_basic_block, insn)
1209 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1211 #ifdef AUTO_INC_DEC
1212 rtx links;
1213 #endif
1215 subst_low_luid = DF_INSN_LUID (insn);
1216 subst_insn = insn;
1218 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1219 insn);
1220 record_dead_and_set_regs (insn);
1222 #ifdef AUTO_INC_DEC
1223 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1224 if (REG_NOTE_KIND (links) == REG_INC)
1225 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1226 insn);
1227 #endif
1229 /* Record the current insn_rtx_cost of this instruction. */
1230 if (NONJUMP_INSN_P (insn))
1231 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1232 optimize_this_for_speed_p);
1233 if (dump_file)
1234 fprintf (dump_file, "insn_cost %d: %d\n",
1235 INSN_UID (insn), INSN_COST (insn));
1239 nonzero_sign_valid = 1;
1241 /* Now scan all the insns in forward order. */
1242 label_tick = label_tick_ebb_start = 1;
1243 init_reg_last ();
1244 setup_incoming_promotions (first);
1245 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1246 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1248 FOR_EACH_BB_FN (this_basic_block, cfun)
1250 rtx_insn *last_combined_insn = NULL;
1251 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1252 last_call_luid = 0;
1253 mem_last_set = -1;
1255 label_tick++;
1256 if (!single_pred_p (this_basic_block)
1257 || single_pred (this_basic_block) != last_bb)
1258 label_tick_ebb_start = label_tick;
1259 last_bb = this_basic_block;
1261 rtl_profile_for_bb (this_basic_block);
1262 for (insn = BB_HEAD (this_basic_block);
1263 insn != NEXT_INSN (BB_END (this_basic_block));
1264 insn = next ? next : NEXT_INSN (insn))
1266 next = 0;
1267 if (!NONDEBUG_INSN_P (insn))
1268 continue;
1270 while (last_combined_insn
1271 && last_combined_insn->deleted ())
1272 last_combined_insn = PREV_INSN (last_combined_insn);
1273 if (last_combined_insn == NULL_RTX
1274 || BARRIER_P (last_combined_insn)
1275 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1276 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1277 last_combined_insn = insn;
1279 /* See if we know about function return values before this
1280 insn based upon SUBREG flags. */
1281 check_promoted_subreg (insn, PATTERN (insn));
1283 /* See if we can find hardregs and subreg of pseudos in
1284 narrower modes. This could help turning TRUNCATEs
1285 into SUBREGs. */
1286 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1288 /* Try this insn with each insn it links back to. */
1290 FOR_EACH_LOG_LINK (links, insn)
1291 if ((next = try_combine (insn, links->insn, NULL,
1292 NULL, &new_direct_jump_p,
1293 last_combined_insn)) != 0)
1295 statistics_counter_event (cfun, "two-insn combine", 1);
1296 goto retry;
1299 /* Try each sequence of three linked insns ending with this one. */
1301 if (max_combine >= 3)
1302 FOR_EACH_LOG_LINK (links, insn)
1304 rtx_insn *link = links->insn;
1306 /* If the linked insn has been replaced by a note, then there
1307 is no point in pursuing this chain any further. */
1308 if (NOTE_P (link))
1309 continue;
1311 FOR_EACH_LOG_LINK (nextlinks, link)
1312 if ((next = try_combine (insn, link, nextlinks->insn,
1313 NULL, &new_direct_jump_p,
1314 last_combined_insn)) != 0)
1316 statistics_counter_event (cfun, "three-insn combine", 1);
1317 goto retry;
1321 #ifdef HAVE_cc0
1322 /* Try to combine a jump insn that uses CC0
1323 with a preceding insn that sets CC0, and maybe with its
1324 logical predecessor as well.
1325 This is how we make decrement-and-branch insns.
1326 We need this special code because data flow connections
1327 via CC0 do not get entered in LOG_LINKS. */
1329 if (JUMP_P (insn)
1330 && (prev = prev_nonnote_insn (insn)) != 0
1331 && NONJUMP_INSN_P (prev)
1332 && sets_cc0_p (PATTERN (prev)))
1334 if ((next = try_combine (insn, prev, NULL, NULL,
1335 &new_direct_jump_p,
1336 last_combined_insn)) != 0)
1337 goto retry;
1339 FOR_EACH_LOG_LINK (nextlinks, prev)
1340 if ((next = try_combine (insn, prev, nextlinks->insn,
1341 NULL, &new_direct_jump_p,
1342 last_combined_insn)) != 0)
1343 goto retry;
1346 /* Do the same for an insn that explicitly references CC0. */
1347 if (NONJUMP_INSN_P (insn)
1348 && (prev = prev_nonnote_insn (insn)) != 0
1349 && NONJUMP_INSN_P (prev)
1350 && sets_cc0_p (PATTERN (prev))
1351 && GET_CODE (PATTERN (insn)) == SET
1352 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1354 if ((next = try_combine (insn, prev, NULL, NULL,
1355 &new_direct_jump_p,
1356 last_combined_insn)) != 0)
1357 goto retry;
1359 FOR_EACH_LOG_LINK (nextlinks, prev)
1360 if ((next = try_combine (insn, prev, nextlinks->insn,
1361 NULL, &new_direct_jump_p,
1362 last_combined_insn)) != 0)
1363 goto retry;
1366 /* Finally, see if any of the insns that this insn links to
1367 explicitly references CC0. If so, try this insn, that insn,
1368 and its predecessor if it sets CC0. */
1369 FOR_EACH_LOG_LINK (links, insn)
1370 if (NONJUMP_INSN_P (links->insn)
1371 && GET_CODE (PATTERN (links->insn)) == SET
1372 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1373 && (prev = prev_nonnote_insn (links->insn)) != 0
1374 && NONJUMP_INSN_P (prev)
1375 && sets_cc0_p (PATTERN (prev))
1376 && (next = try_combine (insn, links->insn,
1377 prev, NULL, &new_direct_jump_p,
1378 last_combined_insn)) != 0)
1379 goto retry;
1380 #endif
1382 /* Try combining an insn with two different insns whose results it
1383 uses. */
1384 if (max_combine >= 3)
1385 FOR_EACH_LOG_LINK (links, insn)
1386 for (nextlinks = links->next; nextlinks;
1387 nextlinks = nextlinks->next)
1388 if ((next = try_combine (insn, links->insn,
1389 nextlinks->insn, NULL,
1390 &new_direct_jump_p,
1391 last_combined_insn)) != 0)
1394 statistics_counter_event (cfun, "three-insn combine", 1);
1395 goto retry;
1398 /* Try four-instruction combinations. */
1399 if (max_combine >= 4)
1400 FOR_EACH_LOG_LINK (links, insn)
1402 struct insn_link *next1;
1403 rtx_insn *link = links->insn;
1405 /* If the linked insn has been replaced by a note, then there
1406 is no point in pursuing this chain any further. */
1407 if (NOTE_P (link))
1408 continue;
1410 FOR_EACH_LOG_LINK (next1, link)
1412 rtx_insn *link1 = next1->insn;
1413 if (NOTE_P (link1))
1414 continue;
1415 /* I0 -> I1 -> I2 -> I3. */
1416 FOR_EACH_LOG_LINK (nextlinks, link1)
1417 if ((next = try_combine (insn, link, link1,
1418 nextlinks->insn,
1419 &new_direct_jump_p,
1420 last_combined_insn)) != 0)
1422 statistics_counter_event (cfun, "four-insn combine", 1);
1423 goto retry;
1425 /* I0, I1 -> I2, I2 -> I3. */
1426 for (nextlinks = next1->next; nextlinks;
1427 nextlinks = nextlinks->next)
1428 if ((next = try_combine (insn, link, link1,
1429 nextlinks->insn,
1430 &new_direct_jump_p,
1431 last_combined_insn)) != 0)
1433 statistics_counter_event (cfun, "four-insn combine", 1);
1434 goto retry;
1438 for (next1 = links->next; next1; next1 = next1->next)
1440 rtx_insn *link1 = next1->insn;
1441 if (NOTE_P (link1))
1442 continue;
1443 /* I0 -> I2; I1, I2 -> I3. */
1444 FOR_EACH_LOG_LINK (nextlinks, link)
1445 if ((next = try_combine (insn, link, link1,
1446 nextlinks->insn,
1447 &new_direct_jump_p,
1448 last_combined_insn)) != 0)
1450 statistics_counter_event (cfun, "four-insn combine", 1);
1451 goto retry;
1453 /* I0 -> I1; I1, I2 -> I3. */
1454 FOR_EACH_LOG_LINK (nextlinks, link1)
1455 if ((next = try_combine (insn, link, link1,
1456 nextlinks->insn,
1457 &new_direct_jump_p,
1458 last_combined_insn)) != 0)
1460 statistics_counter_event (cfun, "four-insn combine", 1);
1461 goto retry;
1466 /* Try this insn with each REG_EQUAL note it links back to. */
1467 FOR_EACH_LOG_LINK (links, insn)
1469 rtx set, note;
1470 rtx_insn *temp = links->insn;
1471 if ((set = single_set (temp)) != 0
1472 && (note = find_reg_equal_equiv_note (temp)) != 0
1473 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1474 /* Avoid using a register that may already been marked
1475 dead by an earlier instruction. */
1476 && ! unmentioned_reg_p (note, SET_SRC (set))
1477 && (GET_MODE (note) == VOIDmode
1478 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1479 : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
1481 /* Temporarily replace the set's source with the
1482 contents of the REG_EQUAL note. The insn will
1483 be deleted or recognized by try_combine. */
1484 rtx orig = SET_SRC (set);
1485 SET_SRC (set) = note;
1486 i2mod = temp;
1487 i2mod_old_rhs = copy_rtx (orig);
1488 i2mod_new_rhs = copy_rtx (note);
1489 next = try_combine (insn, i2mod, NULL, NULL,
1490 &new_direct_jump_p,
1491 last_combined_insn);
1492 i2mod = NULL;
1493 if (next)
1495 statistics_counter_event (cfun, "insn-with-note combine", 1);
1496 goto retry;
1498 SET_SRC (set) = orig;
1502 if (!NOTE_P (insn))
1503 record_dead_and_set_regs (insn);
1505 retry:
1510 default_rtl_profile ();
1511 clear_bb_flags ();
1512 new_direct_jump_p |= purge_all_dead_edges ();
1513 delete_noop_moves ();
1515 /* Clean up. */
1516 obstack_free (&insn_link_obstack, NULL);
1517 free (uid_log_links);
1518 free (uid_insn_cost);
1519 reg_stat.release ();
1522 struct undo *undo, *next;
1523 for (undo = undobuf.frees; undo; undo = next)
1525 next = undo->next;
1526 free (undo);
1528 undobuf.frees = 0;
1531 total_attempts += combine_attempts;
1532 total_merges += combine_merges;
1533 total_extras += combine_extras;
1534 total_successes += combine_successes;
1536 nonzero_sign_valid = 0;
1537 rtl_hooks = general_rtl_hooks;
1539 /* Make recognizer allow volatile MEMs again. */
1540 init_recog ();
1542 return new_direct_jump_p;
1545 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1547 static void
1548 init_reg_last (void)
1550 unsigned int i;
1551 reg_stat_type *p;
1553 FOR_EACH_VEC_ELT (reg_stat, i, p)
1554 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1557 /* Set up any promoted values for incoming argument registers. */
1559 static void
1560 setup_incoming_promotions (rtx_insn *first)
1562 tree arg;
1563 bool strictly_local = false;
1565 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1566 arg = DECL_CHAIN (arg))
1568 rtx x, reg = DECL_INCOMING_RTL (arg);
1569 int uns1, uns3;
1570 machine_mode mode1, mode2, mode3, mode4;
1572 /* Only continue if the incoming argument is in a register. */
1573 if (!REG_P (reg))
1574 continue;
1576 /* Determine, if possible, whether all call sites of the current
1577 function lie within the current compilation unit. (This does
1578 take into account the exporting of a function via taking its
1579 address, and so forth.) */
1580 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1582 /* The mode and signedness of the argument before any promotions happen
1583 (equal to the mode of the pseudo holding it at that stage). */
1584 mode1 = TYPE_MODE (TREE_TYPE (arg));
1585 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1587 /* The mode and signedness of the argument after any source language and
1588 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1589 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1590 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1592 /* The mode and signedness of the argument as it is actually passed,
1593 see assign_parm_setup_reg in function.c. */
1594 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1595 TREE_TYPE (cfun->decl), 0);
1597 /* The mode of the register in which the argument is being passed. */
1598 mode4 = GET_MODE (reg);
1600 /* Eliminate sign extensions in the callee when:
1601 (a) A mode promotion has occurred; */
1602 if (mode1 == mode3)
1603 continue;
1604 /* (b) The mode of the register is the same as the mode of
1605 the argument as it is passed; */
1606 if (mode3 != mode4)
1607 continue;
1608 /* (c) There's no language level extension; */
1609 if (mode1 == mode2)
1611 /* (c.1) All callers are from the current compilation unit. If that's
1612 the case we don't have to rely on an ABI, we only have to know
1613 what we're generating right now, and we know that we will do the
1614 mode1 to mode2 promotion with the given sign. */
1615 else if (!strictly_local)
1616 continue;
1617 /* (c.2) The combination of the two promotions is useful. This is
1618 true when the signs match, or if the first promotion is unsigned.
1619 In the later case, (sign_extend (zero_extend x)) is the same as
1620 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1621 else if (uns1)
1622 uns3 = true;
1623 else if (uns3)
1624 continue;
1626 /* Record that the value was promoted from mode1 to mode3,
1627 so that any sign extension at the head of the current
1628 function may be eliminated. */
1629 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1630 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1631 record_value_for_reg (reg, first, x);
1635 /* Called via note_stores. If X is a pseudo that is narrower than
1636 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1638 If we are setting only a portion of X and we can't figure out what
1639 portion, assume all bits will be used since we don't know what will
1640 be happening.
1642 Similarly, set how many bits of X are known to be copies of the sign bit
1643 at all locations in the function. This is the smallest number implied
1644 by any set of X. */
1646 static void
1647 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1649 rtx_insn *insn = (rtx_insn *) data;
1650 unsigned int num;
1652 if (REG_P (x)
1653 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1654 /* If this register is undefined at the start of the file, we can't
1655 say what its contents were. */
1656 && ! REGNO_REG_SET_P
1657 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1658 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
1660 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1662 if (set == 0 || GET_CODE (set) == CLOBBER)
1664 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1665 rsp->sign_bit_copies = 1;
1666 return;
1669 /* If this register is being initialized using itself, and the
1670 register is uninitialized in this basic block, and there are
1671 no LOG_LINKS which set the register, then part of the
1672 register is uninitialized. In that case we can't assume
1673 anything about the number of nonzero bits.
1675 ??? We could do better if we checked this in
1676 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1677 could avoid making assumptions about the insn which initially
1678 sets the register, while still using the information in other
1679 insns. We would have to be careful to check every insn
1680 involved in the combination. */
1682 if (insn
1683 && reg_referenced_p (x, PATTERN (insn))
1684 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1685 REGNO (x)))
1687 struct insn_link *link;
1689 FOR_EACH_LOG_LINK (link, insn)
1690 if (dead_or_set_p (link->insn, x))
1691 break;
1692 if (!link)
1694 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1695 rsp->sign_bit_copies = 1;
1696 return;
1700 /* If this is a complex assignment, see if we can convert it into a
1701 simple assignment. */
1702 set = expand_field_assignment (set);
1704 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1705 set what we know about X. */
1707 if (SET_DEST (set) == x
1708 || (paradoxical_subreg_p (SET_DEST (set))
1709 && SUBREG_REG (SET_DEST (set)) == x))
1711 rtx src = SET_SRC (set);
1713 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1714 /* If X is narrower than a word and SRC is a non-negative
1715 constant that would appear negative in the mode of X,
1716 sign-extend it for use in reg_stat[].nonzero_bits because some
1717 machines (maybe most) will actually do the sign-extension
1718 and this is the conservative approach.
1720 ??? For 2.5, try to tighten up the MD files in this regard
1721 instead of this kludge. */
1723 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
1724 && CONST_INT_P (src)
1725 && INTVAL (src) > 0
1726 && val_signbit_known_set_p (GET_MODE (x), INTVAL (src)))
1727 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (GET_MODE (x)));
1728 #endif
1730 /* Don't call nonzero_bits if it cannot change anything. */
1731 if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1732 rsp->nonzero_bits |= nonzero_bits (src, nonzero_bits_mode);
1733 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1734 if (rsp->sign_bit_copies == 0
1735 || rsp->sign_bit_copies > num)
1736 rsp->sign_bit_copies = num;
1738 else
1740 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1741 rsp->sign_bit_copies = 1;
1746 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1747 optionally insns that were previously combined into I3 or that will be
1748 combined into the merger of INSN and I3. The order is PRED, PRED2,
1749 INSN, SUCC, SUCC2, I3.
1751 Return 0 if the combination is not allowed for any reason.
1753 If the combination is allowed, *PDEST will be set to the single
1754 destination of INSN and *PSRC to the single source, and this function
1755 will return 1. */
1757 static int
1758 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1759 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1760 rtx *pdest, rtx *psrc)
1762 int i;
1763 const_rtx set = 0;
1764 rtx src, dest;
1765 rtx_insn *p;
1766 #ifdef AUTO_INC_DEC
1767 rtx link;
1768 #endif
1769 bool all_adjacent = true;
1770 int (*is_volatile_p) (const_rtx);
1772 if (succ)
1774 if (succ2)
1776 if (next_active_insn (succ2) != i3)
1777 all_adjacent = false;
1778 if (next_active_insn (succ) != succ2)
1779 all_adjacent = false;
1781 else if (next_active_insn (succ) != i3)
1782 all_adjacent = false;
1783 if (next_active_insn (insn) != succ)
1784 all_adjacent = false;
1786 else if (next_active_insn (insn) != i3)
1787 all_adjacent = false;
1789 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1790 or a PARALLEL consisting of such a SET and CLOBBERs.
1792 If INSN has CLOBBER parallel parts, ignore them for our processing.
1793 By definition, these happen during the execution of the insn. When it
1794 is merged with another insn, all bets are off. If they are, in fact,
1795 needed and aren't also supplied in I3, they may be added by
1796 recog_for_combine. Otherwise, it won't match.
1798 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1799 note.
1801 Get the source and destination of INSN. If more than one, can't
1802 combine. */
1804 if (GET_CODE (PATTERN (insn)) == SET)
1805 set = PATTERN (insn);
1806 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1807 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1809 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1811 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1813 switch (GET_CODE (elt))
1815 /* This is important to combine floating point insns
1816 for the SH4 port. */
1817 case USE:
1818 /* Combining an isolated USE doesn't make sense.
1819 We depend here on combinable_i3pat to reject them. */
1820 /* The code below this loop only verifies that the inputs of
1821 the SET in INSN do not change. We call reg_set_between_p
1822 to verify that the REG in the USE does not change between
1823 I3 and INSN.
1824 If the USE in INSN was for a pseudo register, the matching
1825 insn pattern will likely match any register; combining this
1826 with any other USE would only be safe if we knew that the
1827 used registers have identical values, or if there was
1828 something to tell them apart, e.g. different modes. For
1829 now, we forgo such complicated tests and simply disallow
1830 combining of USES of pseudo registers with any other USE. */
1831 if (REG_P (XEXP (elt, 0))
1832 && GET_CODE (PATTERN (i3)) == PARALLEL)
1834 rtx i3pat = PATTERN (i3);
1835 int i = XVECLEN (i3pat, 0) - 1;
1836 unsigned int regno = REGNO (XEXP (elt, 0));
1840 rtx i3elt = XVECEXP (i3pat, 0, i);
1842 if (GET_CODE (i3elt) == USE
1843 && REG_P (XEXP (i3elt, 0))
1844 && (REGNO (XEXP (i3elt, 0)) == regno
1845 ? reg_set_between_p (XEXP (elt, 0),
1846 PREV_INSN (insn), i3)
1847 : regno >= FIRST_PSEUDO_REGISTER))
1848 return 0;
1850 while (--i >= 0);
1852 break;
1854 /* We can ignore CLOBBERs. */
1855 case CLOBBER:
1856 break;
1858 case SET:
1859 /* Ignore SETs whose result isn't used but not those that
1860 have side-effects. */
1861 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1862 && insn_nothrow_p (insn)
1863 && !side_effects_p (elt))
1864 break;
1866 /* If we have already found a SET, this is a second one and
1867 so we cannot combine with this insn. */
1868 if (set)
1869 return 0;
1871 set = elt;
1872 break;
1874 default:
1875 /* Anything else means we can't combine. */
1876 return 0;
1880 if (set == 0
1881 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1882 so don't do anything with it. */
1883 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1884 return 0;
1886 else
1887 return 0;
1889 if (set == 0)
1890 return 0;
1892 /* The simplification in expand_field_assignment may call back to
1893 get_last_value, so set safe guard here. */
1894 subst_low_luid = DF_INSN_LUID (insn);
1896 set = expand_field_assignment (set);
1897 src = SET_SRC (set), dest = SET_DEST (set);
1899 /* Don't eliminate a store in the stack pointer. */
1900 if (dest == stack_pointer_rtx
1901 /* Don't combine with an insn that sets a register to itself if it has
1902 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1903 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1904 /* Can't merge an ASM_OPERANDS. */
1905 || GET_CODE (src) == ASM_OPERANDS
1906 /* Can't merge a function call. */
1907 || GET_CODE (src) == CALL
1908 /* Don't eliminate a function call argument. */
1909 || (CALL_P (i3)
1910 && (find_reg_fusage (i3, USE, dest)
1911 || (REG_P (dest)
1912 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1913 && global_regs[REGNO (dest)])))
1914 /* Don't substitute into an incremented register. */
1915 || FIND_REG_INC_NOTE (i3, dest)
1916 || (succ && FIND_REG_INC_NOTE (succ, dest))
1917 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1918 /* Don't substitute into a non-local goto, this confuses CFG. */
1919 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1920 /* Make sure that DEST is not used after SUCC but before I3. */
1921 || (!all_adjacent
1922 && ((succ2
1923 && (reg_used_between_p (dest, succ2, i3)
1924 || reg_used_between_p (dest, succ, succ2)))
1925 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1926 /* Make sure that the value that is to be substituted for the register
1927 does not use any registers whose values alter in between. However,
1928 If the insns are adjacent, a use can't cross a set even though we
1929 think it might (this can happen for a sequence of insns each setting
1930 the same destination; last_set of that register might point to
1931 a NOTE). If INSN has a REG_EQUIV note, the register is always
1932 equivalent to the memory so the substitution is valid even if there
1933 are intervening stores. Also, don't move a volatile asm or
1934 UNSPEC_VOLATILE across any other insns. */
1935 || (! all_adjacent
1936 && (((!MEM_P (src)
1937 || ! find_reg_note (insn, REG_EQUIV, src))
1938 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1939 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1940 || GET_CODE (src) == UNSPEC_VOLATILE))
1941 /* Don't combine across a CALL_INSN, because that would possibly
1942 change whether the life span of some REGs crosses calls or not,
1943 and it is a pain to update that information.
1944 Exception: if source is a constant, moving it later can't hurt.
1945 Accept that as a special case. */
1946 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1947 return 0;
1949 /* DEST must either be a REG or CC0. */
1950 if (REG_P (dest))
1952 /* If register alignment is being enforced for multi-word items in all
1953 cases except for parameters, it is possible to have a register copy
1954 insn referencing a hard register that is not allowed to contain the
1955 mode being copied and which would not be valid as an operand of most
1956 insns. Eliminate this problem by not combining with such an insn.
1958 Also, on some machines we don't want to extend the life of a hard
1959 register. */
1961 if (REG_P (src)
1962 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1963 && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1964 /* Don't extend the life of a hard register unless it is
1965 user variable (if we have few registers) or it can't
1966 fit into the desired register (meaning something special
1967 is going on).
1968 Also avoid substituting a return register into I3, because
1969 reload can't handle a conflict with constraints of other
1970 inputs. */
1971 || (REGNO (src) < FIRST_PSEUDO_REGISTER
1972 && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
1973 return 0;
1975 else if (GET_CODE (dest) != CC0)
1976 return 0;
1979 if (GET_CODE (PATTERN (i3)) == PARALLEL)
1980 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1981 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1983 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1985 /* If the clobber represents an earlyclobber operand, we must not
1986 substitute an expression containing the clobbered register.
1987 As we do not analyze the constraint strings here, we have to
1988 make the conservative assumption. However, if the register is
1989 a fixed hard reg, the clobber cannot represent any operand;
1990 we leave it up to the machine description to either accept or
1991 reject use-and-clobber patterns. */
1992 if (!REG_P (reg)
1993 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1994 || !fixed_regs[REGNO (reg)])
1995 if (reg_overlap_mentioned_p (reg, src))
1996 return 0;
1999 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2000 or not), reject, unless nothing volatile comes between it and I3 */
2002 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2004 /* Make sure neither succ nor succ2 contains a volatile reference. */
2005 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2006 return 0;
2007 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2008 return 0;
2009 /* We'll check insns between INSN and I3 below. */
2012 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2013 to be an explicit register variable, and was chosen for a reason. */
2015 if (GET_CODE (src) == ASM_OPERANDS
2016 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2017 return 0;
2019 /* If INSN contains volatile references (specifically volatile MEMs),
2020 we cannot combine across any other volatile references.
2021 Even if INSN doesn't contain volatile references, any intervening
2022 volatile insn might affect machine state. */
2024 is_volatile_p = volatile_refs_p (PATTERN (insn))
2025 ? volatile_refs_p
2026 : volatile_insn_p;
2028 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2029 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2030 return 0;
2032 /* If INSN contains an autoincrement or autodecrement, make sure that
2033 register is not used between there and I3, and not already used in
2034 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2035 Also insist that I3 not be a jump; if it were one
2036 and the incremented register were spilled, we would lose. */
2038 #ifdef AUTO_INC_DEC
2039 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2040 if (REG_NOTE_KIND (link) == REG_INC
2041 && (JUMP_P (i3)
2042 || reg_used_between_p (XEXP (link, 0), insn, i3)
2043 || (pred != NULL_RTX
2044 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2045 || (pred2 != NULL_RTX
2046 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2047 || (succ != NULL_RTX
2048 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2049 || (succ2 != NULL_RTX
2050 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2051 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2052 return 0;
2053 #endif
2055 #ifdef HAVE_cc0
2056 /* Don't combine an insn that follows a CC0-setting insn.
2057 An insn that uses CC0 must not be separated from the one that sets it.
2058 We do, however, allow I2 to follow a CC0-setting insn if that insn
2059 is passed as I1; in that case it will be deleted also.
2060 We also allow combining in this case if all the insns are adjacent
2061 because that would leave the two CC0 insns adjacent as well.
2062 It would be more logical to test whether CC0 occurs inside I1 or I2,
2063 but that would be much slower, and this ought to be equivalent. */
2065 p = prev_nonnote_insn (insn);
2066 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2067 && ! all_adjacent)
2068 return 0;
2069 #endif
2071 /* If we get here, we have passed all the tests and the combination is
2072 to be allowed. */
2074 *pdest = dest;
2075 *psrc = src;
2077 return 1;
2080 /* LOC is the location within I3 that contains its pattern or the component
2081 of a PARALLEL of the pattern. We validate that it is valid for combining.
2083 One problem is if I3 modifies its output, as opposed to replacing it
2084 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2085 doing so would produce an insn that is not equivalent to the original insns.
2087 Consider:
2089 (set (reg:DI 101) (reg:DI 100))
2090 (set (subreg:SI (reg:DI 101) 0) <foo>)
2092 This is NOT equivalent to:
2094 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2095 (set (reg:DI 101) (reg:DI 100))])
2097 Not only does this modify 100 (in which case it might still be valid
2098 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2100 We can also run into a problem if I2 sets a register that I1
2101 uses and I1 gets directly substituted into I3 (not via I2). In that
2102 case, we would be getting the wrong value of I2DEST into I3, so we
2103 must reject the combination. This case occurs when I2 and I1 both
2104 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2105 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2106 of a SET must prevent combination from occurring. The same situation
2107 can occur for I0, in which case I0_NOT_IN_SRC is set.
2109 Before doing the above check, we first try to expand a field assignment
2110 into a set of logical operations.
2112 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2113 we place a register that is both set and used within I3. If more than one
2114 such register is detected, we fail.
2116 Return 1 if the combination is valid, zero otherwise. */
2118 static int
2119 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2120 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2122 rtx x = *loc;
2124 if (GET_CODE (x) == SET)
2126 rtx set = x ;
2127 rtx dest = SET_DEST (set);
2128 rtx src = SET_SRC (set);
2129 rtx inner_dest = dest;
2130 rtx subdest;
2132 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2133 || GET_CODE (inner_dest) == SUBREG
2134 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2135 inner_dest = XEXP (inner_dest, 0);
2137 /* Check for the case where I3 modifies its output, as discussed
2138 above. We don't want to prevent pseudos from being combined
2139 into the address of a MEM, so only prevent the combination if
2140 i1 or i2 set the same MEM. */
2141 if ((inner_dest != dest &&
2142 (!MEM_P (inner_dest)
2143 || rtx_equal_p (i2dest, inner_dest)
2144 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2145 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2146 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2147 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2148 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2150 /* This is the same test done in can_combine_p except we can't test
2151 all_adjacent; we don't have to, since this instruction will stay
2152 in place, thus we are not considering increasing the lifetime of
2153 INNER_DEST.
2155 Also, if this insn sets a function argument, combining it with
2156 something that might need a spill could clobber a previous
2157 function argument; the all_adjacent test in can_combine_p also
2158 checks this; here, we do a more specific test for this case. */
2160 || (REG_P (inner_dest)
2161 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2162 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2163 GET_MODE (inner_dest))))
2164 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2165 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2166 return 0;
2168 /* If DEST is used in I3, it is being killed in this insn, so
2169 record that for later. We have to consider paradoxical
2170 subregs here, since they kill the whole register, but we
2171 ignore partial subregs, STRICT_LOW_PART, etc.
2172 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2173 STACK_POINTER_REGNUM, since these are always considered to be
2174 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2175 subdest = dest;
2176 if (GET_CODE (subdest) == SUBREG
2177 && (GET_MODE_SIZE (GET_MODE (subdest))
2178 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2179 subdest = SUBREG_REG (subdest);
2180 if (pi3dest_killed
2181 && REG_P (subdest)
2182 && reg_referenced_p (subdest, PATTERN (i3))
2183 && REGNO (subdest) != FRAME_POINTER_REGNUM
2184 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2185 && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
2186 #endif
2187 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2188 && (REGNO (subdest) != ARG_POINTER_REGNUM
2189 || ! fixed_regs [REGNO (subdest)])
2190 #endif
2191 && REGNO (subdest) != STACK_POINTER_REGNUM)
2193 if (*pi3dest_killed)
2194 return 0;
2196 *pi3dest_killed = subdest;
2200 else if (GET_CODE (x) == PARALLEL)
2202 int i;
2204 for (i = 0; i < XVECLEN (x, 0); i++)
2205 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2206 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2207 return 0;
2210 return 1;
2213 /* Return 1 if X is an arithmetic expression that contains a multiplication
2214 and division. We don't count multiplications by powers of two here. */
2216 static int
2217 contains_muldiv (rtx x)
2219 switch (GET_CODE (x))
2221 case MOD: case DIV: case UMOD: case UDIV:
2222 return 1;
2224 case MULT:
2225 return ! (CONST_INT_P (XEXP (x, 1))
2226 && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2227 default:
2228 if (BINARY_P (x))
2229 return contains_muldiv (XEXP (x, 0))
2230 || contains_muldiv (XEXP (x, 1));
2232 if (UNARY_P (x))
2233 return contains_muldiv (XEXP (x, 0));
2235 return 0;
2239 /* Determine whether INSN can be used in a combination. Return nonzero if
2240 not. This is used in try_combine to detect early some cases where we
2241 can't perform combinations. */
2243 static int
2244 cant_combine_insn_p (rtx_insn *insn)
2246 rtx set;
2247 rtx src, dest;
2249 /* If this isn't really an insn, we can't do anything.
2250 This can occur when flow deletes an insn that it has merged into an
2251 auto-increment address. */
2252 if (! INSN_P (insn))
2253 return 1;
2255 /* Never combine loads and stores involving hard regs that are likely
2256 to be spilled. The register allocator can usually handle such
2257 reg-reg moves by tying. If we allow the combiner to make
2258 substitutions of likely-spilled regs, reload might die.
2259 As an exception, we allow combinations involving fixed regs; these are
2260 not available to the register allocator so there's no risk involved. */
2262 set = single_set (insn);
2263 if (! set)
2264 return 0;
2265 src = SET_SRC (set);
2266 dest = SET_DEST (set);
2267 if (GET_CODE (src) == SUBREG)
2268 src = SUBREG_REG (src);
2269 if (GET_CODE (dest) == SUBREG)
2270 dest = SUBREG_REG (dest);
2271 if (REG_P (src) && REG_P (dest)
2272 && ((HARD_REGISTER_P (src)
2273 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2274 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2275 || (HARD_REGISTER_P (dest)
2276 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2277 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2278 return 1;
2280 return 0;
2283 struct likely_spilled_retval_info
2285 unsigned regno, nregs;
2286 unsigned mask;
2289 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2290 hard registers that are known to be written to / clobbered in full. */
2291 static void
2292 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2294 struct likely_spilled_retval_info *const info =
2295 (struct likely_spilled_retval_info *) data;
2296 unsigned regno, nregs;
2297 unsigned new_mask;
2299 if (!REG_P (XEXP (set, 0)))
2300 return;
2301 regno = REGNO (x);
2302 if (regno >= info->regno + info->nregs)
2303 return;
2304 nregs = hard_regno_nregs[regno][GET_MODE (x)];
2305 if (regno + nregs <= info->regno)
2306 return;
2307 new_mask = (2U << (nregs - 1)) - 1;
2308 if (regno < info->regno)
2309 new_mask >>= info->regno - regno;
2310 else
2311 new_mask <<= regno - info->regno;
2312 info->mask &= ~new_mask;
2315 /* Return nonzero iff part of the return value is live during INSN, and
2316 it is likely spilled. This can happen when more than one insn is needed
2317 to copy the return value, e.g. when we consider to combine into the
2318 second copy insn for a complex value. */
2320 static int
2321 likely_spilled_retval_p (rtx_insn *insn)
2323 rtx_insn *use = BB_END (this_basic_block);
2324 rtx reg;
2325 rtx_insn *p;
2326 unsigned regno, nregs;
2327 /* We assume here that no machine mode needs more than
2328 32 hard registers when the value overlaps with a register
2329 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2330 unsigned mask;
2331 struct likely_spilled_retval_info info;
2333 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2334 return 0;
2335 reg = XEXP (PATTERN (use), 0);
2336 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2337 return 0;
2338 regno = REGNO (reg);
2339 nregs = hard_regno_nregs[regno][GET_MODE (reg)];
2340 if (nregs == 1)
2341 return 0;
2342 mask = (2U << (nregs - 1)) - 1;
2344 /* Disregard parts of the return value that are set later. */
2345 info.regno = regno;
2346 info.nregs = nregs;
2347 info.mask = mask;
2348 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2349 if (INSN_P (p))
2350 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2351 mask = info.mask;
2353 /* Check if any of the (probably) live return value registers is
2354 likely spilled. */
2355 nregs --;
2358 if ((mask & 1 << nregs)
2359 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2360 return 1;
2361 } while (nregs--);
2362 return 0;
2365 /* Adjust INSN after we made a change to its destination.
2367 Changing the destination can invalidate notes that say something about
2368 the results of the insn and a LOG_LINK pointing to the insn. */
2370 static void
2371 adjust_for_new_dest (rtx_insn *insn)
2373 /* For notes, be conservative and simply remove them. */
2374 remove_reg_equal_equiv_notes (insn);
2376 /* The new insn will have a destination that was previously the destination
2377 of an insn just above it. Call distribute_links to make a LOG_LINK from
2378 the next use of that destination. */
2380 rtx set = single_set (insn);
2381 gcc_assert (set);
2383 rtx reg = SET_DEST (set);
2385 while (GET_CODE (reg) == ZERO_EXTRACT
2386 || GET_CODE (reg) == STRICT_LOW_PART
2387 || GET_CODE (reg) == SUBREG)
2388 reg = XEXP (reg, 0);
2389 gcc_assert (REG_P (reg));
2391 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2393 df_insn_rescan (insn);
2396 /* Return TRUE if combine can reuse reg X in mode MODE.
2397 ADDED_SETS is nonzero if the original set is still required. */
2398 static bool
2399 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2401 unsigned int regno;
2403 if (!REG_P (x))
2404 return false;
2406 regno = REGNO (x);
2407 /* Allow hard registers if the new mode is legal, and occupies no more
2408 registers than the old mode. */
2409 if (regno < FIRST_PSEUDO_REGISTER)
2410 return (HARD_REGNO_MODE_OK (regno, mode)
2411 && (hard_regno_nregs[regno][GET_MODE (x)]
2412 >= hard_regno_nregs[regno][mode]));
2414 /* Or a pseudo that is only used once. */
2415 return (REG_N_SETS (regno) == 1 && !added_sets
2416 && !REG_USERVAR_P (x));
2420 /* Check whether X, the destination of a set, refers to part of
2421 the register specified by REG. */
2423 static bool
2424 reg_subword_p (rtx x, rtx reg)
2426 /* Check that reg is an integer mode register. */
2427 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2428 return false;
2430 if (GET_CODE (x) == STRICT_LOW_PART
2431 || GET_CODE (x) == ZERO_EXTRACT)
2432 x = XEXP (x, 0);
2434 return GET_CODE (x) == SUBREG
2435 && SUBREG_REG (x) == reg
2436 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2439 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2440 Note that the INSN should be deleted *after* removing dead edges, so
2441 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2442 but not for a (set (pc) (label_ref FOO)). */
2444 static void
2445 update_cfg_for_uncondjump (rtx_insn *insn)
2447 basic_block bb = BLOCK_FOR_INSN (insn);
2448 gcc_assert (BB_END (bb) == insn);
2450 purge_dead_edges (bb);
2452 delete_insn (insn);
2453 if (EDGE_COUNT (bb->succs) == 1)
2455 rtx_insn *insn;
2457 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2459 /* Remove barriers from the footer if there are any. */
2460 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2461 if (BARRIER_P (insn))
2463 if (PREV_INSN (insn))
2464 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2465 else
2466 BB_FOOTER (bb) = NEXT_INSN (insn);
2467 if (NEXT_INSN (insn))
2468 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2470 else if (LABEL_P (insn))
2471 break;
2475 #ifndef HAVE_cc0
2476 /* Return whether INSN is a PARALLEL of exactly N register SETs followed
2477 by an arbitrary number of CLOBBERs. */
2478 static bool
2479 is_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2481 rtx pat = PATTERN (insn);
2483 if (GET_CODE (pat) != PARALLEL)
2484 return false;
2486 int len = XVECLEN (pat, 0);
2487 if (len < n)
2488 return false;
2490 int i;
2491 for (i = 0; i < n; i++)
2492 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2493 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2494 return false;
2495 for ( ; i < len; i++)
2496 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
2497 return false;
2499 return true;
2502 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2503 CLOBBERs), can be split into individual SETs in that order, without
2504 changing semantics. */
2505 static bool
2506 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2508 if (!insn_nothrow_p (insn))
2509 return false;
2511 rtx pat = PATTERN (insn);
2513 int i, j;
2514 for (i = 0; i < n; i++)
2516 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2517 return false;
2519 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2521 for (j = i + 1; j < n; j++)
2522 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2523 return false;
2526 return true;
2528 #endif
2530 /* Try to combine the insns I0, I1 and I2 into I3.
2531 Here I0, I1 and I2 appear earlier than I3.
2532 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2535 If we are combining more than two insns and the resulting insn is not
2536 recognized, try splitting it into two insns. If that happens, I2 and I3
2537 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2538 Otherwise, I0, I1 and I2 are pseudo-deleted.
2540 Return 0 if the combination does not work. Then nothing is changed.
2541 If we did the combination, return the insn at which combine should
2542 resume scanning.
2544 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2545 new direct jump instruction.
2547 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2548 been I3 passed to an earlier try_combine within the same basic
2549 block. */
2551 static rtx_insn *
2552 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2553 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2555 /* New patterns for I3 and I2, respectively. */
2556 rtx newpat, newi2pat = 0;
2557 rtvec newpat_vec_with_clobbers = 0;
2558 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2559 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2560 dead. */
2561 int added_sets_0, added_sets_1, added_sets_2;
2562 /* Total number of SETs to put into I3. */
2563 int total_sets;
2564 /* Nonzero if I2's or I1's body now appears in I3. */
2565 int i2_is_used = 0, i1_is_used = 0;
2566 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2567 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2568 /* Contains I3 if the destination of I3 is used in its source, which means
2569 that the old life of I3 is being killed. If that usage is placed into
2570 I2 and not in I3, a REG_DEAD note must be made. */
2571 rtx i3dest_killed = 0;
2572 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2573 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2574 /* Copy of SET_SRC of I1 and I0, if needed. */
2575 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2576 /* Set if I2DEST was reused as a scratch register. */
2577 bool i2scratch = false;
2578 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2579 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2580 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2581 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2582 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2583 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2584 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2585 /* Notes that must be added to REG_NOTES in I3 and I2. */
2586 rtx new_i3_notes, new_i2_notes;
2587 /* Notes that we substituted I3 into I2 instead of the normal case. */
2588 int i3_subst_into_i2 = 0;
2589 /* Notes that I1, I2 or I3 is a MULT operation. */
2590 int have_mult = 0;
2591 int swap_i2i3 = 0;
2592 int changed_i3_dest = 0;
2594 int maxreg;
2595 rtx_insn *temp_insn;
2596 rtx temp_expr;
2597 struct insn_link *link;
2598 rtx other_pat = 0;
2599 rtx new_other_notes;
2600 int i;
2602 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2603 never be). */
2604 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2605 return 0;
2607 /* Only try four-insn combinations when there's high likelihood of
2608 success. Look for simple insns, such as loads of constants or
2609 binary operations involving a constant. */
2610 if (i0)
2612 int i;
2613 int ngood = 0;
2614 int nshift = 0;
2616 if (!flag_expensive_optimizations)
2617 return 0;
2619 for (i = 0; i < 4; i++)
2621 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2622 rtx set = single_set (insn);
2623 rtx src;
2624 if (!set)
2625 continue;
2626 src = SET_SRC (set);
2627 if (CONSTANT_P (src))
2629 ngood += 2;
2630 break;
2632 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2633 ngood++;
2634 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2635 || GET_CODE (src) == LSHIFTRT)
2636 nshift++;
2638 if (ngood < 2 && nshift < 2)
2639 return 0;
2642 /* Exit early if one of the insns involved can't be used for
2643 combinations. */
2644 if (CALL_P (i2)
2645 || (i1 && CALL_P (i1))
2646 || (i0 && CALL_P (i0))
2647 || cant_combine_insn_p (i3)
2648 || cant_combine_insn_p (i2)
2649 || (i1 && cant_combine_insn_p (i1))
2650 || (i0 && cant_combine_insn_p (i0))
2651 || likely_spilled_retval_p (i3))
2652 return 0;
2654 combine_attempts++;
2655 undobuf.other_insn = 0;
2657 /* Reset the hard register usage information. */
2658 CLEAR_HARD_REG_SET (newpat_used_regs);
2660 if (dump_file && (dump_flags & TDF_DETAILS))
2662 if (i0)
2663 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2664 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2665 else if (i1)
2666 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2667 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2668 else
2669 fprintf (dump_file, "\nTrying %d -> %d:\n",
2670 INSN_UID (i2), INSN_UID (i3));
2673 /* If multiple insns feed into one of I2 or I3, they can be in any
2674 order. To simplify the code below, reorder them in sequence. */
2675 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2676 temp_insn = i2, i2 = i0, i0 = temp_insn;
2677 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2678 temp_insn = i1, i1 = i0, i0 = temp_insn;
2679 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2680 temp_insn = i1, i1 = i2, i2 = temp_insn;
2682 added_links_insn = 0;
2684 /* First check for one important special case that the code below will
2685 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2686 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2687 we may be able to replace that destination with the destination of I3.
2688 This occurs in the common code where we compute both a quotient and
2689 remainder into a structure, in which case we want to do the computation
2690 directly into the structure to avoid register-register copies.
2692 Note that this case handles both multiple sets in I2 and also cases
2693 where I2 has a number of CLOBBERs inside the PARALLEL.
2695 We make very conservative checks below and only try to handle the
2696 most common cases of this. For example, we only handle the case
2697 where I2 and I3 are adjacent to avoid making difficult register
2698 usage tests. */
2700 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2701 && REG_P (SET_SRC (PATTERN (i3)))
2702 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2703 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2704 && GET_CODE (PATTERN (i2)) == PARALLEL
2705 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2706 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2707 below would need to check what is inside (and reg_overlap_mentioned_p
2708 doesn't support those codes anyway). Don't allow those destinations;
2709 the resulting insn isn't likely to be recognized anyway. */
2710 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2711 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2712 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2713 SET_DEST (PATTERN (i3)))
2714 && next_active_insn (i2) == i3)
2716 rtx p2 = PATTERN (i2);
2718 /* Make sure that the destination of I3,
2719 which we are going to substitute into one output of I2,
2720 is not used within another output of I2. We must avoid making this:
2721 (parallel [(set (mem (reg 69)) ...)
2722 (set (reg 69) ...)])
2723 which is not well-defined as to order of actions.
2724 (Besides, reload can't handle output reloads for this.)
2726 The problem can also happen if the dest of I3 is a memory ref,
2727 if another dest in I2 is an indirect memory ref. */
2728 for (i = 0; i < XVECLEN (p2, 0); i++)
2729 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2730 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2731 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2732 SET_DEST (XVECEXP (p2, 0, i))))
2733 break;
2735 /* Make sure this PARALLEL is not an asm. We do not allow combining
2736 that usually (see can_combine_p), so do not here either. */
2737 for (i = 0; i < XVECLEN (p2, 0); i++)
2738 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2739 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2740 break;
2742 if (i == XVECLEN (p2, 0))
2743 for (i = 0; i < XVECLEN (p2, 0); i++)
2744 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2745 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2747 combine_merges++;
2749 subst_insn = i3;
2750 subst_low_luid = DF_INSN_LUID (i2);
2752 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2753 i2src = SET_SRC (XVECEXP (p2, 0, i));
2754 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2755 i2dest_killed = dead_or_set_p (i2, i2dest);
2757 /* Replace the dest in I2 with our dest and make the resulting
2758 insn the new pattern for I3. Then skip to where we validate
2759 the pattern. Everything was set up above. */
2760 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2761 newpat = p2;
2762 i3_subst_into_i2 = 1;
2763 goto validate_replacement;
2767 /* If I2 is setting a pseudo to a constant and I3 is setting some
2768 sub-part of it to another constant, merge them by making a new
2769 constant. */
2770 if (i1 == 0
2771 && (temp_expr = single_set (i2)) != 0
2772 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2773 && GET_CODE (PATTERN (i3)) == SET
2774 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2775 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2777 rtx dest = SET_DEST (PATTERN (i3));
2778 int offset = -1;
2779 int width = 0;
2781 if (GET_CODE (dest) == ZERO_EXTRACT)
2783 if (CONST_INT_P (XEXP (dest, 1))
2784 && CONST_INT_P (XEXP (dest, 2)))
2786 width = INTVAL (XEXP (dest, 1));
2787 offset = INTVAL (XEXP (dest, 2));
2788 dest = XEXP (dest, 0);
2789 if (BITS_BIG_ENDIAN)
2790 offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
2793 else
2795 if (GET_CODE (dest) == STRICT_LOW_PART)
2796 dest = XEXP (dest, 0);
2797 width = GET_MODE_PRECISION (GET_MODE (dest));
2798 offset = 0;
2801 if (offset >= 0)
2803 /* If this is the low part, we're done. */
2804 if (subreg_lowpart_p (dest))
2806 /* Handle the case where inner is twice the size of outer. */
2807 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr)))
2808 == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
2809 offset += GET_MODE_PRECISION (GET_MODE (dest));
2810 /* Otherwise give up for now. */
2811 else
2812 offset = -1;
2815 if (offset >= 0)
2817 rtx inner = SET_SRC (PATTERN (i3));
2818 rtx outer = SET_SRC (temp_expr);
2820 wide_int o
2821 = wi::insert (std::make_pair (outer, GET_MODE (SET_DEST (temp_expr))),
2822 std::make_pair (inner, GET_MODE (dest)),
2823 offset, width);
2825 combine_merges++;
2826 subst_insn = i3;
2827 subst_low_luid = DF_INSN_LUID (i2);
2828 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2829 i2dest = SET_DEST (temp_expr);
2830 i2dest_killed = dead_or_set_p (i2, i2dest);
2832 /* Replace the source in I2 with the new constant and make the
2833 resulting insn the new pattern for I3. Then skip to where we
2834 validate the pattern. Everything was set up above. */
2835 SUBST (SET_SRC (temp_expr),
2836 immed_wide_int_const (o, GET_MODE (SET_DEST (temp_expr))));
2838 newpat = PATTERN (i2);
2840 /* The dest of I3 has been replaced with the dest of I2. */
2841 changed_i3_dest = 1;
2842 goto validate_replacement;
2846 #ifndef HAVE_cc0
2847 /* If we have no I1 and I2 looks like:
2848 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2849 (set Y OP)])
2850 make up a dummy I1 that is
2851 (set Y OP)
2852 and change I2 to be
2853 (set (reg:CC X) (compare:CC Y (const_int 0)))
2855 (We can ignore any trailing CLOBBERs.)
2857 This undoes a previous combination and allows us to match a branch-and-
2858 decrement insn. */
2860 if (i1 == 0
2861 && is_parallel_of_n_reg_sets (i2, 2)
2862 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2863 == MODE_CC)
2864 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2865 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2866 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2867 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2868 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2869 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2871 /* We make I1 with the same INSN_UID as I2. This gives it
2872 the same DF_INSN_LUID for value tracking. Our fake I1 will
2873 never appear in the insn stream so giving it the same INSN_UID
2874 as I2 will not cause a problem. */
2876 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2877 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2878 -1, NULL_RTX);
2879 INSN_UID (i1) = INSN_UID (i2);
2881 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2882 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2883 SET_DEST (PATTERN (i1)));
2884 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2885 SUBST_LINK (LOG_LINKS (i2),
2886 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2889 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2890 make those two SETs separate I1 and I2 insns, and make an I0 that is
2891 the original I1. */
2892 if (i0 == 0
2893 && is_parallel_of_n_reg_sets (i2, 2)
2894 && can_split_parallel_of_n_reg_sets (i2, 2)
2895 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2896 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2898 /* If there is no I1, there is no I0 either. */
2899 i0 = i1;
2901 /* We make I1 with the same INSN_UID as I2. This gives it
2902 the same DF_INSN_LUID for value tracking. Our fake I1 will
2903 never appear in the insn stream so giving it the same INSN_UID
2904 as I2 will not cause a problem. */
2906 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2907 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2908 -1, NULL_RTX);
2909 INSN_UID (i1) = INSN_UID (i2);
2911 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2913 #endif
2915 /* Verify that I2 and I1 are valid for combining. */
2916 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
2917 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
2918 &i1dest, &i1src))
2919 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
2920 &i0dest, &i0src)))
2922 undo_all ();
2923 return 0;
2926 /* Record whether I2DEST is used in I2SRC and similarly for the other
2927 cases. Knowing this will help in register status updating below. */
2928 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2929 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2930 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2931 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2932 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2933 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2934 i2dest_killed = dead_or_set_p (i2, i2dest);
2935 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2936 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2938 /* For the earlier insns, determine which of the subsequent ones they
2939 feed. */
2940 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2941 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2942 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2943 : (!reg_overlap_mentioned_p (i1dest, i0dest)
2944 && reg_overlap_mentioned_p (i0dest, i2src))));
2946 /* Ensure that I3's pattern can be the destination of combines. */
2947 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2948 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
2949 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
2950 || (i1dest_in_i0src && !i0_feeds_i1_n)),
2951 &i3dest_killed))
2953 undo_all ();
2954 return 0;
2957 /* See if any of the insns is a MULT operation. Unless one is, we will
2958 reject a combination that is, since it must be slower. Be conservative
2959 here. */
2960 if (GET_CODE (i2src) == MULT
2961 || (i1 != 0 && GET_CODE (i1src) == MULT)
2962 || (i0 != 0 && GET_CODE (i0src) == MULT)
2963 || (GET_CODE (PATTERN (i3)) == SET
2964 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
2965 have_mult = 1;
2967 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
2968 We used to do this EXCEPT in one case: I3 has a post-inc in an
2969 output operand. However, that exception can give rise to insns like
2970 mov r3,(r3)+
2971 which is a famous insn on the PDP-11 where the value of r3 used as the
2972 source was model-dependent. Avoid this sort of thing. */
2974 #if 0
2975 if (!(GET_CODE (PATTERN (i3)) == SET
2976 && REG_P (SET_SRC (PATTERN (i3)))
2977 && MEM_P (SET_DEST (PATTERN (i3)))
2978 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
2979 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
2980 /* It's not the exception. */
2981 #endif
2982 #ifdef AUTO_INC_DEC
2984 rtx link;
2985 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
2986 if (REG_NOTE_KIND (link) == REG_INC
2987 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
2988 || (i1 != 0
2989 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
2991 undo_all ();
2992 return 0;
2995 #endif
2997 /* See if the SETs in I1 or I2 need to be kept around in the merged
2998 instruction: whenever the value set there is still needed past I3.
2999 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3001 For the SET in I1, we have two cases: if I1 and I2 independently feed
3002 into I3, the set in I1 needs to be kept around unless I1DEST dies
3003 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3004 in I1 needs to be kept around unless I1DEST dies or is set in either
3005 I2 or I3. The same considerations apply to I0. */
3007 added_sets_2 = !dead_or_set_p (i3, i2dest);
3009 if (i1)
3010 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3011 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3012 else
3013 added_sets_1 = 0;
3015 if (i0)
3016 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3017 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3018 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3019 && dead_or_set_p (i2, i0dest)));
3020 else
3021 added_sets_0 = 0;
3023 /* We are about to copy insns for the case where they need to be kept
3024 around. Check that they can be copied in the merged instruction. */
3026 if (targetm.cannot_copy_insn_p
3027 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3028 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3029 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3031 undo_all ();
3032 return 0;
3035 /* If the set in I2 needs to be kept around, we must make a copy of
3036 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3037 PATTERN (I2), we are only substituting for the original I1DEST, not into
3038 an already-substituted copy. This also prevents making self-referential
3039 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3040 I2DEST. */
3042 if (added_sets_2)
3044 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3045 i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
3046 else
3047 i2pat = copy_rtx (PATTERN (i2));
3050 if (added_sets_1)
3052 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3053 i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
3054 else
3055 i1pat = copy_rtx (PATTERN (i1));
3058 if (added_sets_0)
3060 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3061 i0pat = gen_rtx_SET (VOIDmode, i0dest, copy_rtx (i0src));
3062 else
3063 i0pat = copy_rtx (PATTERN (i0));
3066 combine_merges++;
3068 /* Substitute in the latest insn for the regs set by the earlier ones. */
3070 maxreg = max_reg_num ();
3072 subst_insn = i3;
3074 #ifndef HAVE_cc0
3075 /* Many machines that don't use CC0 have insns that can both perform an
3076 arithmetic operation and set the condition code. These operations will
3077 be represented as a PARALLEL with the first element of the vector
3078 being a COMPARE of an arithmetic operation with the constant zero.
3079 The second element of the vector will set some pseudo to the result
3080 of the same arithmetic operation. If we simplify the COMPARE, we won't
3081 match such a pattern and so will generate an extra insn. Here we test
3082 for this case, where both the comparison and the operation result are
3083 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3084 I2SRC. Later we will make the PARALLEL that contains I2. */
3086 if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3087 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3088 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3089 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3091 rtx newpat_dest;
3092 rtx *cc_use_loc = NULL;
3093 rtx_insn *cc_use_insn = NULL;
3094 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3095 machine_mode compare_mode, orig_compare_mode;
3096 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3098 newpat = PATTERN (i3);
3099 newpat_dest = SET_DEST (newpat);
3100 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3102 if (undobuf.other_insn == 0
3103 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3104 &cc_use_insn)))
3106 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3107 compare_code = simplify_compare_const (compare_code,
3108 GET_MODE (i2dest), op0, &op1);
3109 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3112 /* Do the rest only if op1 is const0_rtx, which may be the
3113 result of simplification. */
3114 if (op1 == const0_rtx)
3116 /* If a single use of the CC is found, prepare to modify it
3117 when SELECT_CC_MODE returns a new CC-class mode, or when
3118 the above simplify_compare_const() returned a new comparison
3119 operator. undobuf.other_insn is assigned the CC use insn
3120 when modifying it. */
3121 if (cc_use_loc)
3123 #ifdef SELECT_CC_MODE
3124 machine_mode new_mode
3125 = SELECT_CC_MODE (compare_code, op0, op1);
3126 if (new_mode != orig_compare_mode
3127 && can_change_dest_mode (SET_DEST (newpat),
3128 added_sets_2, new_mode))
3130 unsigned int regno = REGNO (newpat_dest);
3131 compare_mode = new_mode;
3132 if (regno < FIRST_PSEUDO_REGISTER)
3133 newpat_dest = gen_rtx_REG (compare_mode, regno);
3134 else
3136 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3137 newpat_dest = regno_reg_rtx[regno];
3140 #endif
3141 /* Cases for modifying the CC-using comparison. */
3142 if (compare_code != orig_compare_code
3143 /* ??? Do we need to verify the zero rtx? */
3144 && XEXP (*cc_use_loc, 1) == const0_rtx)
3146 /* Replace cc_use_loc with entire new RTX. */
3147 SUBST (*cc_use_loc,
3148 gen_rtx_fmt_ee (compare_code, compare_mode,
3149 newpat_dest, const0_rtx));
3150 undobuf.other_insn = cc_use_insn;
3152 else if (compare_mode != orig_compare_mode)
3154 /* Just replace the CC reg with a new mode. */
3155 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3156 undobuf.other_insn = cc_use_insn;
3160 /* Now we modify the current newpat:
3161 First, SET_DEST(newpat) is updated if the CC mode has been
3162 altered. For targets without SELECT_CC_MODE, this should be
3163 optimized away. */
3164 if (compare_mode != orig_compare_mode)
3165 SUBST (SET_DEST (newpat), newpat_dest);
3166 /* This is always done to propagate i2src into newpat. */
3167 SUBST (SET_SRC (newpat),
3168 gen_rtx_COMPARE (compare_mode, op0, op1));
3169 /* Create new version of i2pat if needed; the below PARALLEL
3170 creation needs this to work correctly. */
3171 if (! rtx_equal_p (i2src, op0))
3172 i2pat = gen_rtx_SET (VOIDmode, i2dest, op0);
3173 i2_is_used = 1;
3176 #endif
3178 if (i2_is_used == 0)
3180 /* It is possible that the source of I2 or I1 may be performing
3181 an unneeded operation, such as a ZERO_EXTEND of something
3182 that is known to have the high part zero. Handle that case
3183 by letting subst look at the inner insns.
3185 Another way to do this would be to have a function that tries
3186 to simplify a single insn instead of merging two or more
3187 insns. We don't do this because of the potential of infinite
3188 loops and because of the potential extra memory required.
3189 However, doing it the way we are is a bit of a kludge and
3190 doesn't catch all cases.
3192 But only do this if -fexpensive-optimizations since it slows
3193 things down and doesn't usually win.
3195 This is not done in the COMPARE case above because the
3196 unmodified I2PAT is used in the PARALLEL and so a pattern
3197 with a modified I2SRC would not match. */
3199 if (flag_expensive_optimizations)
3201 /* Pass pc_rtx so no substitutions are done, just
3202 simplifications. */
3203 if (i1)
3205 subst_low_luid = DF_INSN_LUID (i1);
3206 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3209 subst_low_luid = DF_INSN_LUID (i2);
3210 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3213 n_occurrences = 0; /* `subst' counts here */
3214 subst_low_luid = DF_INSN_LUID (i2);
3216 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3217 copy of I2SRC each time we substitute it, in order to avoid creating
3218 self-referential RTL when we will be substituting I1SRC for I1DEST
3219 later. Likewise if I0 feeds into I2, either directly or indirectly
3220 through I1, and I0DEST is in I0SRC. */
3221 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3222 (i1_feeds_i2_n && i1dest_in_i1src)
3223 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3224 && i0dest_in_i0src));
3225 substed_i2 = 1;
3227 /* Record whether I2's body now appears within I3's body. */
3228 i2_is_used = n_occurrences;
3231 /* If we already got a failure, don't try to do more. Otherwise, try to
3232 substitute I1 if we have it. */
3234 if (i1 && GET_CODE (newpat) != CLOBBER)
3236 /* Check that an autoincrement side-effect on I1 has not been lost.
3237 This happens if I1DEST is mentioned in I2 and dies there, and
3238 has disappeared from the new pattern. */
3239 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3240 && i1_feeds_i2_n
3241 && dead_or_set_p (i2, i1dest)
3242 && !reg_overlap_mentioned_p (i1dest, newpat))
3243 /* Before we can do this substitution, we must redo the test done
3244 above (see detailed comments there) that ensures I1DEST isn't
3245 mentioned in any SETs in NEWPAT that are field assignments. */
3246 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3247 0, 0, 0))
3249 undo_all ();
3250 return 0;
3253 n_occurrences = 0;
3254 subst_low_luid = DF_INSN_LUID (i1);
3256 /* If the following substitution will modify I1SRC, make a copy of it
3257 for the case where it is substituted for I1DEST in I2PAT later. */
3258 if (added_sets_2 && i1_feeds_i2_n)
3259 i1src_copy = copy_rtx (i1src);
3261 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3262 copy of I1SRC each time we substitute it, in order to avoid creating
3263 self-referential RTL when we will be substituting I0SRC for I0DEST
3264 later. */
3265 newpat = subst (newpat, i1dest, i1src, 0, 0,
3266 i0_feeds_i1_n && i0dest_in_i0src);
3267 substed_i1 = 1;
3269 /* Record whether I1's body now appears within I3's body. */
3270 i1_is_used = n_occurrences;
3273 /* Likewise for I0 if we have it. */
3275 if (i0 && GET_CODE (newpat) != CLOBBER)
3277 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3278 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3279 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3280 && !reg_overlap_mentioned_p (i0dest, newpat))
3281 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3282 0, 0, 0))
3284 undo_all ();
3285 return 0;
3288 /* If the following substitution will modify I0SRC, make a copy of it
3289 for the case where it is substituted for I0DEST in I1PAT later. */
3290 if (added_sets_1 && i0_feeds_i1_n)
3291 i0src_copy = copy_rtx (i0src);
3292 /* And a copy for I0DEST in I2PAT substitution. */
3293 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3294 || (i0_feeds_i2_n)))
3295 i0src_copy2 = copy_rtx (i0src);
3297 n_occurrences = 0;
3298 subst_low_luid = DF_INSN_LUID (i0);
3299 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3300 substed_i0 = 1;
3303 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3304 to count all the ways that I2SRC and I1SRC can be used. */
3305 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3306 && i2_is_used + added_sets_2 > 1)
3307 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3308 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3309 > 1))
3310 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3311 && (n_occurrences + added_sets_0
3312 + (added_sets_1 && i0_feeds_i1_n)
3313 + (added_sets_2 && i0_feeds_i2_n)
3314 > 1))
3315 /* Fail if we tried to make a new register. */
3316 || max_reg_num () != maxreg
3317 /* Fail if we couldn't do something and have a CLOBBER. */
3318 || GET_CODE (newpat) == CLOBBER
3319 /* Fail if this new pattern is a MULT and we didn't have one before
3320 at the outer level. */
3321 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3322 && ! have_mult))
3324 undo_all ();
3325 return 0;
3328 /* If the actions of the earlier insns must be kept
3329 in addition to substituting them into the latest one,
3330 we must make a new PARALLEL for the latest insn
3331 to hold additional the SETs. */
3333 if (added_sets_0 || added_sets_1 || added_sets_2)
3335 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3336 combine_extras++;
3338 if (GET_CODE (newpat) == PARALLEL)
3340 rtvec old = XVEC (newpat, 0);
3341 total_sets = XVECLEN (newpat, 0) + extra_sets;
3342 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3343 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3344 sizeof (old->elem[0]) * old->num_elem);
3346 else
3348 rtx old = newpat;
3349 total_sets = 1 + extra_sets;
3350 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3351 XVECEXP (newpat, 0, 0) = old;
3354 if (added_sets_0)
3355 XVECEXP (newpat, 0, --total_sets) = i0pat;
3357 if (added_sets_1)
3359 rtx t = i1pat;
3360 if (i0_feeds_i1_n)
3361 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3363 XVECEXP (newpat, 0, --total_sets) = t;
3365 if (added_sets_2)
3367 rtx t = i2pat;
3368 if (i1_feeds_i2_n)
3369 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3370 i0_feeds_i1_n && i0dest_in_i0src);
3371 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3372 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3374 XVECEXP (newpat, 0, --total_sets) = t;
3378 validate_replacement:
3380 /* Note which hard regs this insn has as inputs. */
3381 mark_used_regs_combine (newpat);
3383 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3384 consider splitting this pattern, we might need these clobbers. */
3385 if (i1 && GET_CODE (newpat) == PARALLEL
3386 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3388 int len = XVECLEN (newpat, 0);
3390 newpat_vec_with_clobbers = rtvec_alloc (len);
3391 for (i = 0; i < len; i++)
3392 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3395 /* We have recognized nothing yet. */
3396 insn_code_number = -1;
3398 /* See if this is a PARALLEL of two SETs where one SET's destination is
3399 a register that is unused and this isn't marked as an instruction that
3400 might trap in an EH region. In that case, we just need the other SET.
3401 We prefer this over the PARALLEL.
3403 This can occur when simplifying a divmod insn. We *must* test for this
3404 case here because the code below that splits two independent SETs doesn't
3405 handle this case correctly when it updates the register status.
3407 It's pointless doing this if we originally had two sets, one from
3408 i3, and one from i2. Combining then splitting the parallel results
3409 in the original i2 again plus an invalid insn (which we delete).
3410 The net effect is only to move instructions around, which makes
3411 debug info less accurate. */
3413 if (!(added_sets_2 && i1 == 0)
3414 && GET_CODE (newpat) == PARALLEL
3415 && XVECLEN (newpat, 0) == 2
3416 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3417 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3418 && asm_noperands (newpat) < 0)
3420 rtx set0 = XVECEXP (newpat, 0, 0);
3421 rtx set1 = XVECEXP (newpat, 0, 1);
3422 rtx oldpat = newpat;
3424 if (((REG_P (SET_DEST (set1))
3425 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3426 || (GET_CODE (SET_DEST (set1)) == SUBREG
3427 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3428 && insn_nothrow_p (i3)
3429 && !side_effects_p (SET_SRC (set1)))
3431 newpat = set0;
3432 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3435 else if (((REG_P (SET_DEST (set0))
3436 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3437 || (GET_CODE (SET_DEST (set0)) == SUBREG
3438 && find_reg_note (i3, REG_UNUSED,
3439 SUBREG_REG (SET_DEST (set0)))))
3440 && insn_nothrow_p (i3)
3441 && !side_effects_p (SET_SRC (set0)))
3443 newpat = set1;
3444 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3446 if (insn_code_number >= 0)
3447 changed_i3_dest = 1;
3450 if (insn_code_number < 0)
3451 newpat = oldpat;
3454 /* Is the result of combination a valid instruction? */
3455 if (insn_code_number < 0)
3456 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3458 /* If we were combining three insns and the result is a simple SET
3459 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3460 insns. There are two ways to do this. It can be split using a
3461 machine-specific method (like when you have an addition of a large
3462 constant) or by combine in the function find_split_point. */
3464 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3465 && asm_noperands (newpat) < 0)
3467 rtx parallel, *split;
3468 rtx_insn *m_split_insn;
3470 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3471 use I2DEST as a scratch register will help. In the latter case,
3472 convert I2DEST to the mode of the source of NEWPAT if we can. */
3474 m_split_insn = combine_split_insns (newpat, i3);
3476 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3477 inputs of NEWPAT. */
3479 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3480 possible to try that as a scratch reg. This would require adding
3481 more code to make it work though. */
3483 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3485 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3487 /* First try to split using the original register as a
3488 scratch register. */
3489 parallel = gen_rtx_PARALLEL (VOIDmode,
3490 gen_rtvec (2, newpat,
3491 gen_rtx_CLOBBER (VOIDmode,
3492 i2dest)));
3493 m_split_insn = combine_split_insns (parallel, i3);
3495 /* If that didn't work, try changing the mode of I2DEST if
3496 we can. */
3497 if (m_split_insn == 0
3498 && new_mode != GET_MODE (i2dest)
3499 && new_mode != VOIDmode
3500 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3502 machine_mode old_mode = GET_MODE (i2dest);
3503 rtx ni2dest;
3505 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3506 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3507 else
3509 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3510 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3513 parallel = (gen_rtx_PARALLEL
3514 (VOIDmode,
3515 gen_rtvec (2, newpat,
3516 gen_rtx_CLOBBER (VOIDmode,
3517 ni2dest))));
3518 m_split_insn = combine_split_insns (parallel, i3);
3520 if (m_split_insn == 0
3521 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3523 struct undo *buf;
3525 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3526 buf = undobuf.undos;
3527 undobuf.undos = buf->next;
3528 buf->next = undobuf.frees;
3529 undobuf.frees = buf;
3533 i2scratch = m_split_insn != 0;
3536 /* If recog_for_combine has discarded clobbers, try to use them
3537 again for the split. */
3538 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3540 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3541 m_split_insn = combine_split_insns (parallel, i3);
3544 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3546 rtx m_split_pat = PATTERN (m_split_insn);
3547 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3548 if (insn_code_number >= 0)
3549 newpat = m_split_pat;
3551 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3552 && (next_nonnote_nondebug_insn (i2) == i3
3553 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3555 rtx i2set, i3set;
3556 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3557 newi2pat = PATTERN (m_split_insn);
3559 i3set = single_set (NEXT_INSN (m_split_insn));
3560 i2set = single_set (m_split_insn);
3562 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3564 /* If I2 or I3 has multiple SETs, we won't know how to track
3565 register status, so don't use these insns. If I2's destination
3566 is used between I2 and I3, we also can't use these insns. */
3568 if (i2_code_number >= 0 && i2set && i3set
3569 && (next_nonnote_nondebug_insn (i2) == i3
3570 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3571 insn_code_number = recog_for_combine (&newi3pat, i3,
3572 &new_i3_notes);
3573 if (insn_code_number >= 0)
3574 newpat = newi3pat;
3576 /* It is possible that both insns now set the destination of I3.
3577 If so, we must show an extra use of it. */
3579 if (insn_code_number >= 0)
3581 rtx new_i3_dest = SET_DEST (i3set);
3582 rtx new_i2_dest = SET_DEST (i2set);
3584 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3585 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3586 || GET_CODE (new_i3_dest) == SUBREG)
3587 new_i3_dest = XEXP (new_i3_dest, 0);
3589 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3590 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3591 || GET_CODE (new_i2_dest) == SUBREG)
3592 new_i2_dest = XEXP (new_i2_dest, 0);
3594 if (REG_P (new_i3_dest)
3595 && REG_P (new_i2_dest)
3596 && REGNO (new_i3_dest) == REGNO (new_i2_dest))
3597 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3601 /* If we can split it and use I2DEST, go ahead and see if that
3602 helps things be recognized. Verify that none of the registers
3603 are set between I2 and I3. */
3604 if (insn_code_number < 0
3605 && (split = find_split_point (&newpat, i3, false)) != 0
3606 #ifdef HAVE_cc0
3607 && REG_P (i2dest)
3608 #endif
3609 /* We need I2DEST in the proper mode. If it is a hard register
3610 or the only use of a pseudo, we can change its mode.
3611 Make sure we don't change a hard register to have a mode that
3612 isn't valid for it, or change the number of registers. */
3613 && (GET_MODE (*split) == GET_MODE (i2dest)
3614 || GET_MODE (*split) == VOIDmode
3615 || can_change_dest_mode (i2dest, added_sets_2,
3616 GET_MODE (*split)))
3617 && (next_nonnote_nondebug_insn (i2) == i3
3618 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3619 /* We can't overwrite I2DEST if its value is still used by
3620 NEWPAT. */
3621 && ! reg_referenced_p (i2dest, newpat))
3623 rtx newdest = i2dest;
3624 enum rtx_code split_code = GET_CODE (*split);
3625 machine_mode split_mode = GET_MODE (*split);
3626 bool subst_done = false;
3627 newi2pat = NULL_RTX;
3629 i2scratch = true;
3631 /* *SPLIT may be part of I2SRC, so make sure we have the
3632 original expression around for later debug processing.
3633 We should not need I2SRC any more in other cases. */
3634 if (MAY_HAVE_DEBUG_INSNS)
3635 i2src = copy_rtx (i2src);
3636 else
3637 i2src = NULL;
3639 /* Get NEWDEST as a register in the proper mode. We have already
3640 validated that we can do this. */
3641 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3643 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3644 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3645 else
3647 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3648 newdest = regno_reg_rtx[REGNO (i2dest)];
3652 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3653 an ASHIFT. This can occur if it was inside a PLUS and hence
3654 appeared to be a memory address. This is a kludge. */
3655 if (split_code == MULT
3656 && CONST_INT_P (XEXP (*split, 1))
3657 && INTVAL (XEXP (*split, 1)) > 0
3658 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3660 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3661 XEXP (*split, 0), GEN_INT (i)));
3662 /* Update split_code because we may not have a multiply
3663 anymore. */
3664 split_code = GET_CODE (*split);
3667 #ifdef INSN_SCHEDULING
3668 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3669 be written as a ZERO_EXTEND. */
3670 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3672 #ifdef LOAD_EXTEND_OP
3673 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3674 what it really is. */
3675 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3676 == SIGN_EXTEND)
3677 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3678 SUBREG_REG (*split)));
3679 else
3680 #endif
3681 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3682 SUBREG_REG (*split)));
3684 #endif
3686 /* Attempt to split binary operators using arithmetic identities. */
3687 if (BINARY_P (SET_SRC (newpat))
3688 && split_mode == GET_MODE (SET_SRC (newpat))
3689 && ! side_effects_p (SET_SRC (newpat)))
3691 rtx setsrc = SET_SRC (newpat);
3692 machine_mode mode = GET_MODE (setsrc);
3693 enum rtx_code code = GET_CODE (setsrc);
3694 rtx src_op0 = XEXP (setsrc, 0);
3695 rtx src_op1 = XEXP (setsrc, 1);
3697 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3698 if (rtx_equal_p (src_op0, src_op1))
3700 newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
3701 SUBST (XEXP (setsrc, 0), newdest);
3702 SUBST (XEXP (setsrc, 1), newdest);
3703 subst_done = true;
3705 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3706 else if ((code == PLUS || code == MULT)
3707 && GET_CODE (src_op0) == code
3708 && GET_CODE (XEXP (src_op0, 0)) == code
3709 && (INTEGRAL_MODE_P (mode)
3710 || (FLOAT_MODE_P (mode)
3711 && flag_unsafe_math_optimizations)))
3713 rtx p = XEXP (XEXP (src_op0, 0), 0);
3714 rtx q = XEXP (XEXP (src_op0, 0), 1);
3715 rtx r = XEXP (src_op0, 1);
3716 rtx s = src_op1;
3718 /* Split both "((X op Y) op X) op Y" and
3719 "((X op Y) op Y) op X" as "T op T" where T is
3720 "X op Y". */
3721 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3722 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3724 newi2pat = gen_rtx_SET (VOIDmode, newdest,
3725 XEXP (src_op0, 0));
3726 SUBST (XEXP (setsrc, 0), newdest);
3727 SUBST (XEXP (setsrc, 1), newdest);
3728 subst_done = true;
3730 /* Split "((X op X) op Y) op Y)" as "T op T" where
3731 T is "X op Y". */
3732 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3734 rtx tmp = simplify_gen_binary (code, mode, p, r);
3735 newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
3736 SUBST (XEXP (setsrc, 0), newdest);
3737 SUBST (XEXP (setsrc, 1), newdest);
3738 subst_done = true;
3743 if (!subst_done)
3745 newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
3746 SUBST (*split, newdest);
3749 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3751 /* recog_for_combine might have added CLOBBERs to newi2pat.
3752 Make sure NEWPAT does not depend on the clobbered regs. */
3753 if (GET_CODE (newi2pat) == PARALLEL)
3754 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3755 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3757 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3758 if (reg_overlap_mentioned_p (reg, newpat))
3760 undo_all ();
3761 return 0;
3765 /* If the split point was a MULT and we didn't have one before,
3766 don't use one now. */
3767 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3768 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3772 /* Check for a case where we loaded from memory in a narrow mode and
3773 then sign extended it, but we need both registers. In that case,
3774 we have a PARALLEL with both loads from the same memory location.
3775 We can split this into a load from memory followed by a register-register
3776 copy. This saves at least one insn, more if register allocation can
3777 eliminate the copy.
3779 We cannot do this if the destination of the first assignment is a
3780 condition code register or cc0. We eliminate this case by making sure
3781 the SET_DEST and SET_SRC have the same mode.
3783 We cannot do this if the destination of the second assignment is
3784 a register that we have already assumed is zero-extended. Similarly
3785 for a SUBREG of such a register. */
3787 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3788 && GET_CODE (newpat) == PARALLEL
3789 && XVECLEN (newpat, 0) == 2
3790 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3791 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3792 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3793 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3794 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3795 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3796 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3797 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3798 DF_INSN_LUID (i2))
3799 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3800 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3801 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3802 (REG_P (temp_expr)
3803 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3804 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3805 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3806 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3807 != GET_MODE_MASK (word_mode))))
3808 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3809 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3810 (REG_P (temp_expr)
3811 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3812 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3813 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3814 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3815 != GET_MODE_MASK (word_mode)))))
3816 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3817 SET_SRC (XVECEXP (newpat, 0, 1)))
3818 && ! find_reg_note (i3, REG_UNUSED,
3819 SET_DEST (XVECEXP (newpat, 0, 0))))
3821 rtx ni2dest;
3823 newi2pat = XVECEXP (newpat, 0, 0);
3824 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3825 newpat = XVECEXP (newpat, 0, 1);
3826 SUBST (SET_SRC (newpat),
3827 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3828 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3830 if (i2_code_number >= 0)
3831 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3833 if (insn_code_number >= 0)
3834 swap_i2i3 = 1;
3837 /* Similarly, check for a case where we have a PARALLEL of two independent
3838 SETs but we started with three insns. In this case, we can do the sets
3839 as two separate insns. This case occurs when some SET allows two
3840 other insns to combine, but the destination of that SET is still live.
3842 Also do this if we started with two insns and (at least) one of the
3843 resulting sets is a noop; this noop will be deleted later. */
3845 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3846 && GET_CODE (newpat) == PARALLEL
3847 && XVECLEN (newpat, 0) == 2
3848 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3849 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3850 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3851 || set_noop_p (XVECEXP (newpat, 0, 1)))
3852 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3853 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3854 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3855 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3856 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3857 XVECEXP (newpat, 0, 0))
3858 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3859 XVECEXP (newpat, 0, 1))
3860 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3861 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3863 rtx set0 = XVECEXP (newpat, 0, 0);
3864 rtx set1 = XVECEXP (newpat, 0, 1);
3866 /* Normally, it doesn't matter which of the two is done first,
3867 but the one that references cc0 can't be the second, and
3868 one which uses any regs/memory set in between i2 and i3 can't
3869 be first. The PARALLEL might also have been pre-existing in i3,
3870 so we need to make sure that we won't wrongly hoist a SET to i2
3871 that would conflict with a death note present in there. */
3872 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3873 && !(REG_P (SET_DEST (set1))
3874 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3875 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3876 && find_reg_note (i2, REG_DEAD,
3877 SUBREG_REG (SET_DEST (set1))))
3878 #ifdef HAVE_cc0
3879 && !reg_referenced_p (cc0_rtx, set0)
3880 #endif
3881 /* If I3 is a jump, ensure that set0 is a jump so that
3882 we do not create invalid RTL. */
3883 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3886 newi2pat = set1;
3887 newpat = set0;
3889 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
3890 && !(REG_P (SET_DEST (set0))
3891 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3892 && !(GET_CODE (SET_DEST (set0)) == SUBREG
3893 && find_reg_note (i2, REG_DEAD,
3894 SUBREG_REG (SET_DEST (set0))))
3895 #ifdef HAVE_cc0
3896 && !reg_referenced_p (cc0_rtx, set1)
3897 #endif
3898 /* If I3 is a jump, ensure that set1 is a jump so that
3899 we do not create invalid RTL. */
3900 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
3903 newi2pat = set0;
3904 newpat = set1;
3906 else
3908 undo_all ();
3909 return 0;
3912 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3914 if (i2_code_number >= 0)
3916 /* recog_for_combine might have added CLOBBERs to newi2pat.
3917 Make sure NEWPAT does not depend on the clobbered regs. */
3918 if (GET_CODE (newi2pat) == PARALLEL)
3920 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3921 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3923 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3924 if (reg_overlap_mentioned_p (reg, newpat))
3926 undo_all ();
3927 return 0;
3932 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3936 /* If it still isn't recognized, fail and change things back the way they
3937 were. */
3938 if ((insn_code_number < 0
3939 /* Is the result a reasonable ASM_OPERANDS? */
3940 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
3942 undo_all ();
3943 return 0;
3946 /* If we had to change another insn, make sure it is valid also. */
3947 if (undobuf.other_insn)
3949 CLEAR_HARD_REG_SET (newpat_used_regs);
3951 other_pat = PATTERN (undobuf.other_insn);
3952 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
3953 &new_other_notes);
3955 if (other_code_number < 0 && ! check_asm_operands (other_pat))
3957 undo_all ();
3958 return 0;
3962 #ifdef HAVE_cc0
3963 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
3964 they are adjacent to each other or not. */
3966 rtx_insn *p = prev_nonnote_insn (i3);
3967 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
3968 && sets_cc0_p (newi2pat))
3970 undo_all ();
3971 return 0;
3974 #endif
3976 /* Only allow this combination if insn_rtx_costs reports that the
3977 replacement instructions are cheaper than the originals. */
3978 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
3980 undo_all ();
3981 return 0;
3984 if (MAY_HAVE_DEBUG_INSNS)
3986 struct undo *undo;
3988 for (undo = undobuf.undos; undo; undo = undo->next)
3989 if (undo->kind == UNDO_MODE)
3991 rtx reg = *undo->where.r;
3992 machine_mode new_mode = GET_MODE (reg);
3993 machine_mode old_mode = undo->old_contents.m;
3995 /* Temporarily revert mode back. */
3996 adjust_reg_mode (reg, old_mode);
3998 if (reg == i2dest && i2scratch)
4000 /* If we used i2dest as a scratch register with a
4001 different mode, substitute it for the original
4002 i2src while its original mode is temporarily
4003 restored, and then clear i2scratch so that we don't
4004 do it again later. */
4005 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4006 this_basic_block);
4007 i2scratch = false;
4008 /* Put back the new mode. */
4009 adjust_reg_mode (reg, new_mode);
4011 else
4013 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4014 rtx_insn *first, *last;
4016 if (reg == i2dest)
4018 first = i2;
4019 last = last_combined_insn;
4021 else
4023 first = i3;
4024 last = undobuf.other_insn;
4025 gcc_assert (last);
4026 if (DF_INSN_LUID (last)
4027 < DF_INSN_LUID (last_combined_insn))
4028 last = last_combined_insn;
4031 /* We're dealing with a reg that changed mode but not
4032 meaning, so we want to turn it into a subreg for
4033 the new mode. However, because of REG sharing and
4034 because its mode had already changed, we have to do
4035 it in two steps. First, replace any debug uses of
4036 reg, with its original mode temporarily restored,
4037 with this copy we have created; then, replace the
4038 copy with the SUBREG of the original shared reg,
4039 once again changed to the new mode. */
4040 propagate_for_debug (first, last, reg, tempreg,
4041 this_basic_block);
4042 adjust_reg_mode (reg, new_mode);
4043 propagate_for_debug (first, last, tempreg,
4044 lowpart_subreg (old_mode, reg, new_mode),
4045 this_basic_block);
4050 /* If we will be able to accept this, we have made a
4051 change to the destination of I3. This requires us to
4052 do a few adjustments. */
4054 if (changed_i3_dest)
4056 PATTERN (i3) = newpat;
4057 adjust_for_new_dest (i3);
4060 /* We now know that we can do this combination. Merge the insns and
4061 update the status of registers and LOG_LINKS. */
4063 if (undobuf.other_insn)
4065 rtx note, next;
4067 PATTERN (undobuf.other_insn) = other_pat;
4069 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4070 ensure that they are still valid. Then add any non-duplicate
4071 notes added by recog_for_combine. */
4072 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4074 next = XEXP (note, 1);
4076 if ((REG_NOTE_KIND (note) == REG_DEAD
4077 && !reg_referenced_p (XEXP (note, 0),
4078 PATTERN (undobuf.other_insn)))
4079 ||(REG_NOTE_KIND (note) == REG_UNUSED
4080 && !reg_set_p (XEXP (note, 0),
4081 PATTERN (undobuf.other_insn))))
4082 remove_note (undobuf.other_insn, note);
4085 distribute_notes (new_other_notes, undobuf.other_insn,
4086 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4087 NULL_RTX);
4090 if (swap_i2i3)
4092 rtx_insn *insn;
4093 struct insn_link *link;
4094 rtx ni2dest;
4096 /* I3 now uses what used to be its destination and which is now
4097 I2's destination. This requires us to do a few adjustments. */
4098 PATTERN (i3) = newpat;
4099 adjust_for_new_dest (i3);
4101 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4102 so we still will.
4104 However, some later insn might be using I2's dest and have
4105 a LOG_LINK pointing at I3. We must remove this link.
4106 The simplest way to remove the link is to point it at I1,
4107 which we know will be a NOTE. */
4109 /* newi2pat is usually a SET here; however, recog_for_combine might
4110 have added some clobbers. */
4111 if (GET_CODE (newi2pat) == PARALLEL)
4112 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4113 else
4114 ni2dest = SET_DEST (newi2pat);
4116 for (insn = NEXT_INSN (i3);
4117 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4118 || insn != BB_HEAD (this_basic_block->next_bb));
4119 insn = NEXT_INSN (insn))
4121 if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
4123 FOR_EACH_LOG_LINK (link, insn)
4124 if (link->insn == i3)
4125 link->insn = i1;
4127 break;
4133 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4134 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4135 rtx midnotes = 0;
4136 int from_luid;
4137 /* Compute which registers we expect to eliminate. newi2pat may be setting
4138 either i3dest or i2dest, so we must check it. */
4139 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4140 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4141 || !i2dest_killed
4142 ? 0 : i2dest);
4143 /* For i1, we need to compute both local elimination and global
4144 elimination information with respect to newi2pat because i1dest
4145 may be the same as i3dest, in which case newi2pat may be setting
4146 i1dest. Global information is used when distributing REG_DEAD
4147 note for i2 and i3, in which case it does matter if newi2pat sets
4148 i1dest or not.
4150 Local information is used when distributing REG_DEAD note for i1,
4151 in which case it doesn't matter if newi2pat sets i1dest or not.
4152 See PR62151, if we have four insns combination:
4153 i0: r0 <- i0src
4154 i1: r1 <- i1src (using r0)
4155 REG_DEAD (r0)
4156 i2: r0 <- i2src (using r1)
4157 i3: r3 <- i3src (using r0)
4158 ix: using r0
4159 From i1's point of view, r0 is eliminated, no matter if it is set
4160 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4161 should be discarded.
4163 Note local information only affects cases in forms like "I1->I2->I3",
4164 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4165 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4166 i0dest anyway. */
4167 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4168 || !i1dest_killed
4169 ? 0 : i1dest);
4170 rtx elim_i1 = (local_elim_i1 == 0
4171 || (newi2pat && reg_set_p (i1dest, newi2pat))
4172 ? 0 : i1dest);
4173 /* Same case as i1. */
4174 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4175 ? 0 : i0dest);
4176 rtx elim_i0 = (local_elim_i0 == 0
4177 || (newi2pat && reg_set_p (i0dest, newi2pat))
4178 ? 0 : i0dest);
4180 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4181 clear them. */
4182 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4183 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4184 if (i1)
4185 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4186 if (i0)
4187 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4189 /* Ensure that we do not have something that should not be shared but
4190 occurs multiple times in the new insns. Check this by first
4191 resetting all the `used' flags and then copying anything is shared. */
4193 reset_used_flags (i3notes);
4194 reset_used_flags (i2notes);
4195 reset_used_flags (i1notes);
4196 reset_used_flags (i0notes);
4197 reset_used_flags (newpat);
4198 reset_used_flags (newi2pat);
4199 if (undobuf.other_insn)
4200 reset_used_flags (PATTERN (undobuf.other_insn));
4202 i3notes = copy_rtx_if_shared (i3notes);
4203 i2notes = copy_rtx_if_shared (i2notes);
4204 i1notes = copy_rtx_if_shared (i1notes);
4205 i0notes = copy_rtx_if_shared (i0notes);
4206 newpat = copy_rtx_if_shared (newpat);
4207 newi2pat = copy_rtx_if_shared (newi2pat);
4208 if (undobuf.other_insn)
4209 reset_used_flags (PATTERN (undobuf.other_insn));
4211 INSN_CODE (i3) = insn_code_number;
4212 PATTERN (i3) = newpat;
4214 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4216 rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4218 reset_used_flags (call_usage);
4219 call_usage = copy_rtx (call_usage);
4221 if (substed_i2)
4223 /* I2SRC must still be meaningful at this point. Some splitting
4224 operations can invalidate I2SRC, but those operations do not
4225 apply to calls. */
4226 gcc_assert (i2src);
4227 replace_rtx (call_usage, i2dest, i2src);
4230 if (substed_i1)
4231 replace_rtx (call_usage, i1dest, i1src);
4232 if (substed_i0)
4233 replace_rtx (call_usage, i0dest, i0src);
4235 CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4238 if (undobuf.other_insn)
4239 INSN_CODE (undobuf.other_insn) = other_code_number;
4241 /* We had one special case above where I2 had more than one set and
4242 we replaced a destination of one of those sets with the destination
4243 of I3. In that case, we have to update LOG_LINKS of insns later
4244 in this basic block. Note that this (expensive) case is rare.
4246 Also, in this case, we must pretend that all REG_NOTEs for I2
4247 actually came from I3, so that REG_UNUSED notes from I2 will be
4248 properly handled. */
4250 if (i3_subst_into_i2)
4252 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4253 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4254 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4255 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4256 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4257 && ! find_reg_note (i2, REG_UNUSED,
4258 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4259 for (temp_insn = NEXT_INSN (i2);
4260 temp_insn
4261 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4262 || BB_HEAD (this_basic_block) != temp_insn);
4263 temp_insn = NEXT_INSN (temp_insn))
4264 if (temp_insn != i3 && INSN_P (temp_insn))
4265 FOR_EACH_LOG_LINK (link, temp_insn)
4266 if (link->insn == i2)
4267 link->insn = i3;
4269 if (i3notes)
4271 rtx link = i3notes;
4272 while (XEXP (link, 1))
4273 link = XEXP (link, 1);
4274 XEXP (link, 1) = i2notes;
4276 else
4277 i3notes = i2notes;
4278 i2notes = 0;
4281 LOG_LINKS (i3) = NULL;
4282 REG_NOTES (i3) = 0;
4283 LOG_LINKS (i2) = NULL;
4284 REG_NOTES (i2) = 0;
4286 if (newi2pat)
4288 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4289 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4290 this_basic_block);
4291 INSN_CODE (i2) = i2_code_number;
4292 PATTERN (i2) = newi2pat;
4294 else
4296 if (MAY_HAVE_DEBUG_INSNS && i2src)
4297 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4298 this_basic_block);
4299 SET_INSN_DELETED (i2);
4302 if (i1)
4304 LOG_LINKS (i1) = NULL;
4305 REG_NOTES (i1) = 0;
4306 if (MAY_HAVE_DEBUG_INSNS)
4307 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4308 this_basic_block);
4309 SET_INSN_DELETED (i1);
4312 if (i0)
4314 LOG_LINKS (i0) = NULL;
4315 REG_NOTES (i0) = 0;
4316 if (MAY_HAVE_DEBUG_INSNS)
4317 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4318 this_basic_block);
4319 SET_INSN_DELETED (i0);
4322 /* Get death notes for everything that is now used in either I3 or
4323 I2 and used to die in a previous insn. If we built two new
4324 patterns, move from I1 to I2 then I2 to I3 so that we get the
4325 proper movement on registers that I2 modifies. */
4327 if (i0)
4328 from_luid = DF_INSN_LUID (i0);
4329 else if (i1)
4330 from_luid = DF_INSN_LUID (i1);
4331 else
4332 from_luid = DF_INSN_LUID (i2);
4333 if (newi2pat)
4334 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4335 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4337 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4338 if (i3notes)
4339 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4340 elim_i2, elim_i1, elim_i0);
4341 if (i2notes)
4342 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4343 elim_i2, elim_i1, elim_i0);
4344 if (i1notes)
4345 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4346 elim_i2, local_elim_i1, local_elim_i0);
4347 if (i0notes)
4348 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4349 elim_i2, elim_i1, local_elim_i0);
4350 if (midnotes)
4351 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4352 elim_i2, elim_i1, elim_i0);
4354 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4355 know these are REG_UNUSED and want them to go to the desired insn,
4356 so we always pass it as i3. */
4358 if (newi2pat && new_i2_notes)
4359 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4360 NULL_RTX);
4362 if (new_i3_notes)
4363 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4364 NULL_RTX);
4366 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4367 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4368 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4369 in that case, it might delete I2. Similarly for I2 and I1.
4370 Show an additional death due to the REG_DEAD note we make here. If
4371 we discard it in distribute_notes, we will decrement it again. */
4373 if (i3dest_killed)
4375 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4376 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4377 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4378 elim_i1, elim_i0);
4379 else
4380 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4381 elim_i2, elim_i1, elim_i0);
4384 if (i2dest_in_i2src)
4386 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4387 if (newi2pat && reg_set_p (i2dest, newi2pat))
4388 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4389 NULL_RTX, NULL_RTX);
4390 else
4391 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4392 NULL_RTX, NULL_RTX, NULL_RTX);
4395 if (i1dest_in_i1src)
4397 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4398 if (newi2pat && reg_set_p (i1dest, newi2pat))
4399 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4400 NULL_RTX, NULL_RTX);
4401 else
4402 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4403 NULL_RTX, NULL_RTX, NULL_RTX);
4406 if (i0dest_in_i0src)
4408 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4409 if (newi2pat && reg_set_p (i0dest, newi2pat))
4410 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4411 NULL_RTX, NULL_RTX);
4412 else
4413 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4414 NULL_RTX, NULL_RTX, NULL_RTX);
4417 distribute_links (i3links);
4418 distribute_links (i2links);
4419 distribute_links (i1links);
4420 distribute_links (i0links);
4422 if (REG_P (i2dest))
4424 struct insn_link *link;
4425 rtx_insn *i2_insn = 0;
4426 rtx i2_val = 0, set;
4428 /* The insn that used to set this register doesn't exist, and
4429 this life of the register may not exist either. See if one of
4430 I3's links points to an insn that sets I2DEST. If it does,
4431 that is now the last known value for I2DEST. If we don't update
4432 this and I2 set the register to a value that depended on its old
4433 contents, we will get confused. If this insn is used, thing
4434 will be set correctly in combine_instructions. */
4435 FOR_EACH_LOG_LINK (link, i3)
4436 if ((set = single_set (link->insn)) != 0
4437 && rtx_equal_p (i2dest, SET_DEST (set)))
4438 i2_insn = link->insn, i2_val = SET_SRC (set);
4440 record_value_for_reg (i2dest, i2_insn, i2_val);
4442 /* If the reg formerly set in I2 died only once and that was in I3,
4443 zero its use count so it won't make `reload' do any work. */
4444 if (! added_sets_2
4445 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4446 && ! i2dest_in_i2src)
4447 INC_REG_N_SETS (REGNO (i2dest), -1);
4450 if (i1 && REG_P (i1dest))
4452 struct insn_link *link;
4453 rtx_insn *i1_insn = 0;
4454 rtx i1_val = 0, set;
4456 FOR_EACH_LOG_LINK (link, i3)
4457 if ((set = single_set (link->insn)) != 0
4458 && rtx_equal_p (i1dest, SET_DEST (set)))
4459 i1_insn = link->insn, i1_val = SET_SRC (set);
4461 record_value_for_reg (i1dest, i1_insn, i1_val);
4463 if (! added_sets_1 && ! i1dest_in_i1src)
4464 INC_REG_N_SETS (REGNO (i1dest), -1);
4467 if (i0 && REG_P (i0dest))
4469 struct insn_link *link;
4470 rtx_insn *i0_insn = 0;
4471 rtx i0_val = 0, set;
4473 FOR_EACH_LOG_LINK (link, i3)
4474 if ((set = single_set (link->insn)) != 0
4475 && rtx_equal_p (i0dest, SET_DEST (set)))
4476 i0_insn = link->insn, i0_val = SET_SRC (set);
4478 record_value_for_reg (i0dest, i0_insn, i0_val);
4480 if (! added_sets_0 && ! i0dest_in_i0src)
4481 INC_REG_N_SETS (REGNO (i0dest), -1);
4484 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4485 been made to this insn. The order is important, because newi2pat
4486 can affect nonzero_bits of newpat. */
4487 if (newi2pat)
4488 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4489 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4492 if (undobuf.other_insn != NULL_RTX)
4494 if (dump_file)
4496 fprintf (dump_file, "modifying other_insn ");
4497 dump_insn_slim (dump_file, undobuf.other_insn);
4499 df_insn_rescan (undobuf.other_insn);
4502 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4504 if (dump_file)
4506 fprintf (dump_file, "modifying insn i0 ");
4507 dump_insn_slim (dump_file, i0);
4509 df_insn_rescan (i0);
4512 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4514 if (dump_file)
4516 fprintf (dump_file, "modifying insn i1 ");
4517 dump_insn_slim (dump_file, i1);
4519 df_insn_rescan (i1);
4522 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4524 if (dump_file)
4526 fprintf (dump_file, "modifying insn i2 ");
4527 dump_insn_slim (dump_file, i2);
4529 df_insn_rescan (i2);
4532 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4534 if (dump_file)
4536 fprintf (dump_file, "modifying insn i3 ");
4537 dump_insn_slim (dump_file, i3);
4539 df_insn_rescan (i3);
4542 /* Set new_direct_jump_p if a new return or simple jump instruction
4543 has been created. Adjust the CFG accordingly. */
4544 if (returnjump_p (i3) || any_uncondjump_p (i3))
4546 *new_direct_jump_p = 1;
4547 mark_jump_label (PATTERN (i3), i3, 0);
4548 update_cfg_for_uncondjump (i3);
4551 if (undobuf.other_insn != NULL_RTX
4552 && (returnjump_p (undobuf.other_insn)
4553 || any_uncondjump_p (undobuf.other_insn)))
4555 *new_direct_jump_p = 1;
4556 update_cfg_for_uncondjump (undobuf.other_insn);
4559 /* A noop might also need cleaning up of CFG, if it comes from the
4560 simplification of a jump. */
4561 if (JUMP_P (i3)
4562 && GET_CODE (newpat) == SET
4563 && SET_SRC (newpat) == pc_rtx
4564 && SET_DEST (newpat) == pc_rtx)
4566 *new_direct_jump_p = 1;
4567 update_cfg_for_uncondjump (i3);
4570 if (undobuf.other_insn != NULL_RTX
4571 && JUMP_P (undobuf.other_insn)
4572 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4573 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4574 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4576 *new_direct_jump_p = 1;
4577 update_cfg_for_uncondjump (undobuf.other_insn);
4580 combine_successes++;
4581 undo_commit ();
4583 if (added_links_insn
4584 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4585 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4586 return added_links_insn;
4587 else
4588 return newi2pat ? i2 : i3;
4591 /* Undo all the modifications recorded in undobuf. */
4593 static void
4594 undo_all (void)
4596 struct undo *undo, *next;
4598 for (undo = undobuf.undos; undo; undo = next)
4600 next = undo->next;
4601 switch (undo->kind)
4603 case UNDO_RTX:
4604 *undo->where.r = undo->old_contents.r;
4605 break;
4606 case UNDO_INT:
4607 *undo->where.i = undo->old_contents.i;
4608 break;
4609 case UNDO_MODE:
4610 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4611 break;
4612 case UNDO_LINKS:
4613 *undo->where.l = undo->old_contents.l;
4614 break;
4615 default:
4616 gcc_unreachable ();
4619 undo->next = undobuf.frees;
4620 undobuf.frees = undo;
4623 undobuf.undos = 0;
4626 /* We've committed to accepting the changes we made. Move all
4627 of the undos to the free list. */
4629 static void
4630 undo_commit (void)
4632 struct undo *undo, *next;
4634 for (undo = undobuf.undos; undo; undo = next)
4636 next = undo->next;
4637 undo->next = undobuf.frees;
4638 undobuf.frees = undo;
4640 undobuf.undos = 0;
4643 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4644 where we have an arithmetic expression and return that point. LOC will
4645 be inside INSN.
4647 try_combine will call this function to see if an insn can be split into
4648 two insns. */
4650 static rtx *
4651 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4653 rtx x = *loc;
4654 enum rtx_code code = GET_CODE (x);
4655 rtx *split;
4656 unsigned HOST_WIDE_INT len = 0;
4657 HOST_WIDE_INT pos = 0;
4658 int unsignedp = 0;
4659 rtx inner = NULL_RTX;
4661 /* First special-case some codes. */
4662 switch (code)
4664 case SUBREG:
4665 #ifdef INSN_SCHEDULING
4666 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4667 point. */
4668 if (MEM_P (SUBREG_REG (x)))
4669 return loc;
4670 #endif
4671 return find_split_point (&SUBREG_REG (x), insn, false);
4673 case MEM:
4674 #ifdef HAVE_lo_sum
4675 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4676 using LO_SUM and HIGH. */
4677 if (GET_CODE (XEXP (x, 0)) == CONST
4678 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
4680 machine_mode address_mode = get_address_mode (x);
4682 SUBST (XEXP (x, 0),
4683 gen_rtx_LO_SUM (address_mode,
4684 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4685 XEXP (x, 0)));
4686 return &XEXP (XEXP (x, 0), 0);
4688 #endif
4690 /* If we have a PLUS whose second operand is a constant and the
4691 address is not valid, perhaps will can split it up using
4692 the machine-specific way to split large constants. We use
4693 the first pseudo-reg (one of the virtual regs) as a placeholder;
4694 it will not remain in the result. */
4695 if (GET_CODE (XEXP (x, 0)) == PLUS
4696 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4697 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4698 MEM_ADDR_SPACE (x)))
4700 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4701 rtx_insn *seq = combine_split_insns (gen_rtx_SET (VOIDmode, reg,
4702 XEXP (x, 0)),
4703 subst_insn);
4705 /* This should have produced two insns, each of which sets our
4706 placeholder. If the source of the second is a valid address,
4707 we can make put both sources together and make a split point
4708 in the middle. */
4710 if (seq
4711 && NEXT_INSN (seq) != NULL_RTX
4712 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4713 && NONJUMP_INSN_P (seq)
4714 && GET_CODE (PATTERN (seq)) == SET
4715 && SET_DEST (PATTERN (seq)) == reg
4716 && ! reg_mentioned_p (reg,
4717 SET_SRC (PATTERN (seq)))
4718 && NONJUMP_INSN_P (NEXT_INSN (seq))
4719 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4720 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4721 && memory_address_addr_space_p
4722 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4723 MEM_ADDR_SPACE (x)))
4725 rtx src1 = SET_SRC (PATTERN (seq));
4726 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4728 /* Replace the placeholder in SRC2 with SRC1. If we can
4729 find where in SRC2 it was placed, that can become our
4730 split point and we can replace this address with SRC2.
4731 Just try two obvious places. */
4733 src2 = replace_rtx (src2, reg, src1);
4734 split = 0;
4735 if (XEXP (src2, 0) == src1)
4736 split = &XEXP (src2, 0);
4737 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4738 && XEXP (XEXP (src2, 0), 0) == src1)
4739 split = &XEXP (XEXP (src2, 0), 0);
4741 if (split)
4743 SUBST (XEXP (x, 0), src2);
4744 return split;
4748 /* If that didn't work, perhaps the first operand is complex and
4749 needs to be computed separately, so make a split point there.
4750 This will occur on machines that just support REG + CONST
4751 and have a constant moved through some previous computation. */
4753 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4754 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4755 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4756 return &XEXP (XEXP (x, 0), 0);
4759 /* If we have a PLUS whose first operand is complex, try computing it
4760 separately by making a split there. */
4761 if (GET_CODE (XEXP (x, 0)) == PLUS
4762 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4763 MEM_ADDR_SPACE (x))
4764 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4765 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4766 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4767 return &XEXP (XEXP (x, 0), 0);
4768 break;
4770 case SET:
4771 #ifdef HAVE_cc0
4772 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4773 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4774 we need to put the operand into a register. So split at that
4775 point. */
4777 if (SET_DEST (x) == cc0_rtx
4778 && GET_CODE (SET_SRC (x)) != COMPARE
4779 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4780 && !OBJECT_P (SET_SRC (x))
4781 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4782 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4783 return &SET_SRC (x);
4784 #endif
4786 /* See if we can split SET_SRC as it stands. */
4787 split = find_split_point (&SET_SRC (x), insn, true);
4788 if (split && split != &SET_SRC (x))
4789 return split;
4791 /* See if we can split SET_DEST as it stands. */
4792 split = find_split_point (&SET_DEST (x), insn, false);
4793 if (split && split != &SET_DEST (x))
4794 return split;
4796 /* See if this is a bitfield assignment with everything constant. If
4797 so, this is an IOR of an AND, so split it into that. */
4798 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4799 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
4800 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4801 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4802 && CONST_INT_P (SET_SRC (x))
4803 && ((INTVAL (XEXP (SET_DEST (x), 1))
4804 + INTVAL (XEXP (SET_DEST (x), 2)))
4805 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
4806 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4808 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4809 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4810 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4811 rtx dest = XEXP (SET_DEST (x), 0);
4812 machine_mode mode = GET_MODE (dest);
4813 unsigned HOST_WIDE_INT mask
4814 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4815 rtx or_mask;
4817 if (BITS_BIG_ENDIAN)
4818 pos = GET_MODE_PRECISION (mode) - len - pos;
4820 or_mask = gen_int_mode (src << pos, mode);
4821 if (src == mask)
4822 SUBST (SET_SRC (x),
4823 simplify_gen_binary (IOR, mode, dest, or_mask));
4824 else
4826 rtx negmask = gen_int_mode (~(mask << pos), mode);
4827 SUBST (SET_SRC (x),
4828 simplify_gen_binary (IOR, mode,
4829 simplify_gen_binary (AND, mode,
4830 dest, negmask),
4831 or_mask));
4834 SUBST (SET_DEST (x), dest);
4836 split = find_split_point (&SET_SRC (x), insn, true);
4837 if (split && split != &SET_SRC (x))
4838 return split;
4841 /* Otherwise, see if this is an operation that we can split into two.
4842 If so, try to split that. */
4843 code = GET_CODE (SET_SRC (x));
4845 switch (code)
4847 case AND:
4848 /* If we are AND'ing with a large constant that is only a single
4849 bit and the result is only being used in a context where we
4850 need to know if it is zero or nonzero, replace it with a bit
4851 extraction. This will avoid the large constant, which might
4852 have taken more than one insn to make. If the constant were
4853 not a valid argument to the AND but took only one insn to make,
4854 this is no worse, but if it took more than one insn, it will
4855 be better. */
4857 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4858 && REG_P (XEXP (SET_SRC (x), 0))
4859 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4860 && REG_P (SET_DEST (x))
4861 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
4862 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4863 && XEXP (*split, 0) == SET_DEST (x)
4864 && XEXP (*split, 1) == const0_rtx)
4866 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4867 XEXP (SET_SRC (x), 0),
4868 pos, NULL_RTX, 1, 1, 0, 0);
4869 if (extraction != 0)
4871 SUBST (SET_SRC (x), extraction);
4872 return find_split_point (loc, insn, false);
4875 break;
4877 case NE:
4878 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4879 is known to be on, this can be converted into a NEG of a shift. */
4880 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4881 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4882 && 1 <= (pos = exact_log2
4883 (nonzero_bits (XEXP (SET_SRC (x), 0),
4884 GET_MODE (XEXP (SET_SRC (x), 0))))))
4886 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4888 SUBST (SET_SRC (x),
4889 gen_rtx_NEG (mode,
4890 gen_rtx_LSHIFTRT (mode,
4891 XEXP (SET_SRC (x), 0),
4892 GEN_INT (pos))));
4894 split = find_split_point (&SET_SRC (x), insn, true);
4895 if (split && split != &SET_SRC (x))
4896 return split;
4898 break;
4900 case SIGN_EXTEND:
4901 inner = XEXP (SET_SRC (x), 0);
4903 /* We can't optimize if either mode is a partial integer
4904 mode as we don't know how many bits are significant
4905 in those modes. */
4906 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4907 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4908 break;
4910 pos = 0;
4911 len = GET_MODE_PRECISION (GET_MODE (inner));
4912 unsignedp = 0;
4913 break;
4915 case SIGN_EXTRACT:
4916 case ZERO_EXTRACT:
4917 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4918 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
4920 inner = XEXP (SET_SRC (x), 0);
4921 len = INTVAL (XEXP (SET_SRC (x), 1));
4922 pos = INTVAL (XEXP (SET_SRC (x), 2));
4924 if (BITS_BIG_ENDIAN)
4925 pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
4926 unsignedp = (code == ZERO_EXTRACT);
4928 break;
4930 default:
4931 break;
4934 if (len && pos >= 0
4935 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
4937 machine_mode mode = GET_MODE (SET_SRC (x));
4939 /* For unsigned, we have a choice of a shift followed by an
4940 AND or two shifts. Use two shifts for field sizes where the
4941 constant might be too large. We assume here that we can
4942 always at least get 8-bit constants in an AND insn, which is
4943 true for every current RISC. */
4945 if (unsignedp && len <= 8)
4947 unsigned HOST_WIDE_INT mask
4948 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4949 SUBST (SET_SRC (x),
4950 gen_rtx_AND (mode,
4951 gen_rtx_LSHIFTRT
4952 (mode, gen_lowpart (mode, inner),
4953 GEN_INT (pos)),
4954 gen_int_mode (mask, mode)));
4956 split = find_split_point (&SET_SRC (x), insn, true);
4957 if (split && split != &SET_SRC (x))
4958 return split;
4960 else
4962 SUBST (SET_SRC (x),
4963 gen_rtx_fmt_ee
4964 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
4965 gen_rtx_ASHIFT (mode,
4966 gen_lowpart (mode, inner),
4967 GEN_INT (GET_MODE_PRECISION (mode)
4968 - len - pos)),
4969 GEN_INT (GET_MODE_PRECISION (mode) - len)));
4971 split = find_split_point (&SET_SRC (x), insn, true);
4972 if (split && split != &SET_SRC (x))
4973 return split;
4977 /* See if this is a simple operation with a constant as the second
4978 operand. It might be that this constant is out of range and hence
4979 could be used as a split point. */
4980 if (BINARY_P (SET_SRC (x))
4981 && CONSTANT_P (XEXP (SET_SRC (x), 1))
4982 && (OBJECT_P (XEXP (SET_SRC (x), 0))
4983 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
4984 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
4985 return &XEXP (SET_SRC (x), 1);
4987 /* Finally, see if this is a simple operation with its first operand
4988 not in a register. The operation might require this operand in a
4989 register, so return it as a split point. We can always do this
4990 because if the first operand were another operation, we would have
4991 already found it as a split point. */
4992 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
4993 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
4994 return &XEXP (SET_SRC (x), 0);
4996 return 0;
4998 case AND:
4999 case IOR:
5000 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5001 it is better to write this as (not (ior A B)) so we can split it.
5002 Similarly for IOR. */
5003 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5005 SUBST (*loc,
5006 gen_rtx_NOT (GET_MODE (x),
5007 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5008 GET_MODE (x),
5009 XEXP (XEXP (x, 0), 0),
5010 XEXP (XEXP (x, 1), 0))));
5011 return find_split_point (loc, insn, set_src);
5014 /* Many RISC machines have a large set of logical insns. If the
5015 second operand is a NOT, put it first so we will try to split the
5016 other operand first. */
5017 if (GET_CODE (XEXP (x, 1)) == NOT)
5019 rtx tem = XEXP (x, 0);
5020 SUBST (XEXP (x, 0), XEXP (x, 1));
5021 SUBST (XEXP (x, 1), tem);
5023 break;
5025 case PLUS:
5026 case MINUS:
5027 /* Canonicalization can produce (minus A (mult B C)), where C is a
5028 constant. It may be better to try splitting (plus (mult B -C) A)
5029 instead if this isn't a multiply by a power of two. */
5030 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5031 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5032 && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
5034 machine_mode mode = GET_MODE (x);
5035 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5036 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5037 SUBST (*loc, gen_rtx_PLUS (mode,
5038 gen_rtx_MULT (mode,
5039 XEXP (XEXP (x, 1), 0),
5040 gen_int_mode (other_int,
5041 mode)),
5042 XEXP (x, 0)));
5043 return find_split_point (loc, insn, set_src);
5046 /* Split at a multiply-accumulate instruction. However if this is
5047 the SET_SRC, we likely do not have such an instruction and it's
5048 worthless to try this split. */
5049 if (!set_src && GET_CODE (XEXP (x, 0)) == MULT)
5050 return loc;
5052 default:
5053 break;
5056 /* Otherwise, select our actions depending on our rtx class. */
5057 switch (GET_RTX_CLASS (code))
5059 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5060 case RTX_TERNARY:
5061 split = find_split_point (&XEXP (x, 2), insn, false);
5062 if (split)
5063 return split;
5064 /* ... fall through ... */
5065 case RTX_BIN_ARITH:
5066 case RTX_COMM_ARITH:
5067 case RTX_COMPARE:
5068 case RTX_COMM_COMPARE:
5069 split = find_split_point (&XEXP (x, 1), insn, false);
5070 if (split)
5071 return split;
5072 /* ... fall through ... */
5073 case RTX_UNARY:
5074 /* Some machines have (and (shift ...) ...) insns. If X is not
5075 an AND, but XEXP (X, 0) is, use it as our split point. */
5076 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5077 return &XEXP (x, 0);
5079 split = find_split_point (&XEXP (x, 0), insn, false);
5080 if (split)
5081 return split;
5082 return loc;
5084 default:
5085 /* Otherwise, we don't have a split point. */
5086 return 0;
5090 /* Throughout X, replace FROM with TO, and return the result.
5091 The result is TO if X is FROM;
5092 otherwise the result is X, but its contents may have been modified.
5093 If they were modified, a record was made in undobuf so that
5094 undo_all will (among other things) return X to its original state.
5096 If the number of changes necessary is too much to record to undo,
5097 the excess changes are not made, so the result is invalid.
5098 The changes already made can still be undone.
5099 undobuf.num_undo is incremented for such changes, so by testing that
5100 the caller can tell whether the result is valid.
5102 `n_occurrences' is incremented each time FROM is replaced.
5104 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5106 IN_COND is nonzero if we are at the top level of a condition.
5108 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5109 by copying if `n_occurrences' is nonzero. */
5111 static rtx
5112 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5114 enum rtx_code code = GET_CODE (x);
5115 machine_mode op0_mode = VOIDmode;
5116 const char *fmt;
5117 int len, i;
5118 rtx new_rtx;
5120 /* Two expressions are equal if they are identical copies of a shared
5121 RTX or if they are both registers with the same register number
5122 and mode. */
5124 #define COMBINE_RTX_EQUAL_P(X,Y) \
5125 ((X) == (Y) \
5126 || (REG_P (X) && REG_P (Y) \
5127 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5129 /* Do not substitute into clobbers of regs -- this will never result in
5130 valid RTL. */
5131 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5132 return x;
5134 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5136 n_occurrences++;
5137 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5140 /* If X and FROM are the same register but different modes, they
5141 will not have been seen as equal above. However, the log links code
5142 will make a LOG_LINKS entry for that case. If we do nothing, we
5143 will try to rerecognize our original insn and, when it succeeds,
5144 we will delete the feeding insn, which is incorrect.
5146 So force this insn not to match in this (rare) case. */
5147 if (! in_dest && code == REG && REG_P (from)
5148 && reg_overlap_mentioned_p (x, from))
5149 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5151 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5152 of which may contain things that can be combined. */
5153 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5154 return x;
5156 /* It is possible to have a subexpression appear twice in the insn.
5157 Suppose that FROM is a register that appears within TO.
5158 Then, after that subexpression has been scanned once by `subst',
5159 the second time it is scanned, TO may be found. If we were
5160 to scan TO here, we would find FROM within it and create a
5161 self-referent rtl structure which is completely wrong. */
5162 if (COMBINE_RTX_EQUAL_P (x, to))
5163 return to;
5165 /* Parallel asm_operands need special attention because all of the
5166 inputs are shared across the arms. Furthermore, unsharing the
5167 rtl results in recognition failures. Failure to handle this case
5168 specially can result in circular rtl.
5170 Solve this by doing a normal pass across the first entry of the
5171 parallel, and only processing the SET_DESTs of the subsequent
5172 entries. Ug. */
5174 if (code == PARALLEL
5175 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5176 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5178 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5180 /* If this substitution failed, this whole thing fails. */
5181 if (GET_CODE (new_rtx) == CLOBBER
5182 && XEXP (new_rtx, 0) == const0_rtx)
5183 return new_rtx;
5185 SUBST (XVECEXP (x, 0, 0), new_rtx);
5187 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5189 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5191 if (!REG_P (dest)
5192 && GET_CODE (dest) != CC0
5193 && GET_CODE (dest) != PC)
5195 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5197 /* If this substitution failed, this whole thing fails. */
5198 if (GET_CODE (new_rtx) == CLOBBER
5199 && XEXP (new_rtx, 0) == const0_rtx)
5200 return new_rtx;
5202 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5206 else
5208 len = GET_RTX_LENGTH (code);
5209 fmt = GET_RTX_FORMAT (code);
5211 /* We don't need to process a SET_DEST that is a register, CC0,
5212 or PC, so set up to skip this common case. All other cases
5213 where we want to suppress replacing something inside a
5214 SET_SRC are handled via the IN_DEST operand. */
5215 if (code == SET
5216 && (REG_P (SET_DEST (x))
5217 || GET_CODE (SET_DEST (x)) == CC0
5218 || GET_CODE (SET_DEST (x)) == PC))
5219 fmt = "ie";
5221 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5222 constant. */
5223 if (fmt[0] == 'e')
5224 op0_mode = GET_MODE (XEXP (x, 0));
5226 for (i = 0; i < len; i++)
5228 if (fmt[i] == 'E')
5230 int j;
5231 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5233 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5235 new_rtx = (unique_copy && n_occurrences
5236 ? copy_rtx (to) : to);
5237 n_occurrences++;
5239 else
5241 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5242 unique_copy);
5244 /* If this substitution failed, this whole thing
5245 fails. */
5246 if (GET_CODE (new_rtx) == CLOBBER
5247 && XEXP (new_rtx, 0) == const0_rtx)
5248 return new_rtx;
5251 SUBST (XVECEXP (x, i, j), new_rtx);
5254 else if (fmt[i] == 'e')
5256 /* If this is a register being set, ignore it. */
5257 new_rtx = XEXP (x, i);
5258 if (in_dest
5259 && i == 0
5260 && (((code == SUBREG || code == ZERO_EXTRACT)
5261 && REG_P (new_rtx))
5262 || code == STRICT_LOW_PART))
5265 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5267 /* In general, don't install a subreg involving two
5268 modes not tieable. It can worsen register
5269 allocation, and can even make invalid reload
5270 insns, since the reg inside may need to be copied
5271 from in the outside mode, and that may be invalid
5272 if it is an fp reg copied in integer mode.
5274 We allow two exceptions to this: It is valid if
5275 it is inside another SUBREG and the mode of that
5276 SUBREG and the mode of the inside of TO is
5277 tieable and it is valid if X is a SET that copies
5278 FROM to CC0. */
5280 if (GET_CODE (to) == SUBREG
5281 && ! MODES_TIEABLE_P (GET_MODE (to),
5282 GET_MODE (SUBREG_REG (to)))
5283 && ! (code == SUBREG
5284 && MODES_TIEABLE_P (GET_MODE (x),
5285 GET_MODE (SUBREG_REG (to))))
5286 #ifdef HAVE_cc0
5287 && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
5288 #endif
5290 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5292 if (code == SUBREG
5293 && REG_P (to)
5294 && REGNO (to) < FIRST_PSEUDO_REGISTER
5295 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5296 SUBREG_BYTE (x),
5297 GET_MODE (x)) < 0)
5298 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5300 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5301 n_occurrences++;
5303 else
5304 /* If we are in a SET_DEST, suppress most cases unless we
5305 have gone inside a MEM, in which case we want to
5306 simplify the address. We assume here that things that
5307 are actually part of the destination have their inner
5308 parts in the first expression. This is true for SUBREG,
5309 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5310 things aside from REG and MEM that should appear in a
5311 SET_DEST. */
5312 new_rtx = subst (XEXP (x, i), from, to,
5313 (((in_dest
5314 && (code == SUBREG || code == STRICT_LOW_PART
5315 || code == ZERO_EXTRACT))
5316 || code == SET)
5317 && i == 0),
5318 code == IF_THEN_ELSE && i == 0,
5319 unique_copy);
5321 /* If we found that we will have to reject this combination,
5322 indicate that by returning the CLOBBER ourselves, rather than
5323 an expression containing it. This will speed things up as
5324 well as prevent accidents where two CLOBBERs are considered
5325 to be equal, thus producing an incorrect simplification. */
5327 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5328 return new_rtx;
5330 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5332 machine_mode mode = GET_MODE (x);
5334 x = simplify_subreg (GET_MODE (x), new_rtx,
5335 GET_MODE (SUBREG_REG (x)),
5336 SUBREG_BYTE (x));
5337 if (! x)
5338 x = gen_rtx_CLOBBER (mode, const0_rtx);
5340 else if (CONST_SCALAR_INT_P (new_rtx)
5341 && GET_CODE (x) == ZERO_EXTEND)
5343 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5344 new_rtx, GET_MODE (XEXP (x, 0)));
5345 gcc_assert (x);
5347 else
5348 SUBST (XEXP (x, i), new_rtx);
5353 /* Check if we are loading something from the constant pool via float
5354 extension; in this case we would undo compress_float_constant
5355 optimization and degenerate constant load to an immediate value. */
5356 if (GET_CODE (x) == FLOAT_EXTEND
5357 && MEM_P (XEXP (x, 0))
5358 && MEM_READONLY_P (XEXP (x, 0)))
5360 rtx tmp = avoid_constant_pool_reference (x);
5361 if (x != tmp)
5362 return x;
5365 /* Try to simplify X. If the simplification changed the code, it is likely
5366 that further simplification will help, so loop, but limit the number
5367 of repetitions that will be performed. */
5369 for (i = 0; i < 4; i++)
5371 /* If X is sufficiently simple, don't bother trying to do anything
5372 with it. */
5373 if (code != CONST_INT && code != REG && code != CLOBBER)
5374 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5376 if (GET_CODE (x) == code)
5377 break;
5379 code = GET_CODE (x);
5381 /* We no longer know the original mode of operand 0 since we
5382 have changed the form of X) */
5383 op0_mode = VOIDmode;
5386 return x;
5389 /* Simplify X, a piece of RTL. We just operate on the expression at the
5390 outer level; call `subst' to simplify recursively. Return the new
5391 expression.
5393 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5394 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5395 of a condition. */
5397 static rtx
5398 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5399 int in_cond)
5401 enum rtx_code code = GET_CODE (x);
5402 machine_mode mode = GET_MODE (x);
5403 rtx temp;
5404 int i;
5406 /* If this is a commutative operation, put a constant last and a complex
5407 expression first. We don't need to do this for comparisons here. */
5408 if (COMMUTATIVE_ARITH_P (x)
5409 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5411 temp = XEXP (x, 0);
5412 SUBST (XEXP (x, 0), XEXP (x, 1));
5413 SUBST (XEXP (x, 1), temp);
5416 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5417 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5418 things. Check for cases where both arms are testing the same
5419 condition.
5421 Don't do anything if all operands are very simple. */
5423 if ((BINARY_P (x)
5424 && ((!OBJECT_P (XEXP (x, 0))
5425 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5426 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5427 || (!OBJECT_P (XEXP (x, 1))
5428 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5429 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5430 || (UNARY_P (x)
5431 && (!OBJECT_P (XEXP (x, 0))
5432 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5433 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5435 rtx cond, true_rtx, false_rtx;
5437 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5438 if (cond != 0
5439 /* If everything is a comparison, what we have is highly unlikely
5440 to be simpler, so don't use it. */
5441 && ! (COMPARISON_P (x)
5442 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5444 rtx cop1 = const0_rtx;
5445 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5447 if (cond_code == NE && COMPARISON_P (cond))
5448 return x;
5450 /* Simplify the alternative arms; this may collapse the true and
5451 false arms to store-flag values. Be careful to use copy_rtx
5452 here since true_rtx or false_rtx might share RTL with x as a
5453 result of the if_then_else_cond call above. */
5454 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5455 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5457 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5458 is unlikely to be simpler. */
5459 if (general_operand (true_rtx, VOIDmode)
5460 && general_operand (false_rtx, VOIDmode))
5462 enum rtx_code reversed;
5464 /* Restarting if we generate a store-flag expression will cause
5465 us to loop. Just drop through in this case. */
5467 /* If the result values are STORE_FLAG_VALUE and zero, we can
5468 just make the comparison operation. */
5469 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5470 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5471 cond, cop1);
5472 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5473 && ((reversed = reversed_comparison_code_parts
5474 (cond_code, cond, cop1, NULL))
5475 != UNKNOWN))
5476 x = simplify_gen_relational (reversed, mode, VOIDmode,
5477 cond, cop1);
5479 /* Likewise, we can make the negate of a comparison operation
5480 if the result values are - STORE_FLAG_VALUE and zero. */
5481 else if (CONST_INT_P (true_rtx)
5482 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5483 && false_rtx == const0_rtx)
5484 x = simplify_gen_unary (NEG, mode,
5485 simplify_gen_relational (cond_code,
5486 mode, VOIDmode,
5487 cond, cop1),
5488 mode);
5489 else if (CONST_INT_P (false_rtx)
5490 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5491 && true_rtx == const0_rtx
5492 && ((reversed = reversed_comparison_code_parts
5493 (cond_code, cond, cop1, NULL))
5494 != UNKNOWN))
5495 x = simplify_gen_unary (NEG, mode,
5496 simplify_gen_relational (reversed,
5497 mode, VOIDmode,
5498 cond, cop1),
5499 mode);
5500 else
5501 return gen_rtx_IF_THEN_ELSE (mode,
5502 simplify_gen_relational (cond_code,
5503 mode,
5504 VOIDmode,
5505 cond,
5506 cop1),
5507 true_rtx, false_rtx);
5509 code = GET_CODE (x);
5510 op0_mode = VOIDmode;
5515 /* Try to fold this expression in case we have constants that weren't
5516 present before. */
5517 temp = 0;
5518 switch (GET_RTX_CLASS (code))
5520 case RTX_UNARY:
5521 if (op0_mode == VOIDmode)
5522 op0_mode = GET_MODE (XEXP (x, 0));
5523 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5524 break;
5525 case RTX_COMPARE:
5526 case RTX_COMM_COMPARE:
5528 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5529 if (cmp_mode == VOIDmode)
5531 cmp_mode = GET_MODE (XEXP (x, 1));
5532 if (cmp_mode == VOIDmode)
5533 cmp_mode = op0_mode;
5535 temp = simplify_relational_operation (code, mode, cmp_mode,
5536 XEXP (x, 0), XEXP (x, 1));
5538 break;
5539 case RTX_COMM_ARITH:
5540 case RTX_BIN_ARITH:
5541 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5542 break;
5543 case RTX_BITFIELD_OPS:
5544 case RTX_TERNARY:
5545 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5546 XEXP (x, 1), XEXP (x, 2));
5547 break;
5548 default:
5549 break;
5552 if (temp)
5554 x = temp;
5555 code = GET_CODE (temp);
5556 op0_mode = VOIDmode;
5557 mode = GET_MODE (temp);
5560 /* First see if we can apply the inverse distributive law. */
5561 if (code == PLUS || code == MINUS
5562 || code == AND || code == IOR || code == XOR)
5564 x = apply_distributive_law (x);
5565 code = GET_CODE (x);
5566 op0_mode = VOIDmode;
5569 /* If CODE is an associative operation not otherwise handled, see if we
5570 can associate some operands. This can win if they are constants or
5571 if they are logically related (i.e. (a & b) & a). */
5572 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5573 || code == AND || code == IOR || code == XOR
5574 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5575 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5576 || (flag_associative_math && FLOAT_MODE_P (mode))))
5578 if (GET_CODE (XEXP (x, 0)) == code)
5580 rtx other = XEXP (XEXP (x, 0), 0);
5581 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5582 rtx inner_op1 = XEXP (x, 1);
5583 rtx inner;
5585 /* Make sure we pass the constant operand if any as the second
5586 one if this is a commutative operation. */
5587 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5589 rtx tem = inner_op0;
5590 inner_op0 = inner_op1;
5591 inner_op1 = tem;
5593 inner = simplify_binary_operation (code == MINUS ? PLUS
5594 : code == DIV ? MULT
5595 : code,
5596 mode, inner_op0, inner_op1);
5598 /* For commutative operations, try the other pair if that one
5599 didn't simplify. */
5600 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5602 other = XEXP (XEXP (x, 0), 1);
5603 inner = simplify_binary_operation (code, mode,
5604 XEXP (XEXP (x, 0), 0),
5605 XEXP (x, 1));
5608 if (inner)
5609 return simplify_gen_binary (code, mode, other, inner);
5613 /* A little bit of algebraic simplification here. */
5614 switch (code)
5616 case MEM:
5617 /* Ensure that our address has any ASHIFTs converted to MULT in case
5618 address-recognizing predicates are called later. */
5619 temp = make_compound_operation (XEXP (x, 0), MEM);
5620 SUBST (XEXP (x, 0), temp);
5621 break;
5623 case SUBREG:
5624 if (op0_mode == VOIDmode)
5625 op0_mode = GET_MODE (SUBREG_REG (x));
5627 /* See if this can be moved to simplify_subreg. */
5628 if (CONSTANT_P (SUBREG_REG (x))
5629 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5630 /* Don't call gen_lowpart if the inner mode
5631 is VOIDmode and we cannot simplify it, as SUBREG without
5632 inner mode is invalid. */
5633 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5634 || gen_lowpart_common (mode, SUBREG_REG (x))))
5635 return gen_lowpart (mode, SUBREG_REG (x));
5637 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5638 break;
5640 rtx temp;
5641 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5642 SUBREG_BYTE (x));
5643 if (temp)
5644 return temp;
5646 /* If op is known to have all lower bits zero, the result is zero. */
5647 if (!in_dest
5648 && SCALAR_INT_MODE_P (mode)
5649 && SCALAR_INT_MODE_P (op0_mode)
5650 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (op0_mode)
5651 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5652 && HWI_COMPUTABLE_MODE_P (op0_mode)
5653 && (nonzero_bits (SUBREG_REG (x), op0_mode)
5654 & GET_MODE_MASK (mode)) == 0)
5655 return CONST0_RTX (mode);
5658 /* Don't change the mode of the MEM if that would change the meaning
5659 of the address. */
5660 if (MEM_P (SUBREG_REG (x))
5661 && (MEM_VOLATILE_P (SUBREG_REG (x))
5662 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5663 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5664 return gen_rtx_CLOBBER (mode, const0_rtx);
5666 /* Note that we cannot do any narrowing for non-constants since
5667 we might have been counting on using the fact that some bits were
5668 zero. We now do this in the SET. */
5670 break;
5672 case NEG:
5673 temp = expand_compound_operation (XEXP (x, 0));
5675 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5676 replaced by (lshiftrt X C). This will convert
5677 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5679 if (GET_CODE (temp) == ASHIFTRT
5680 && CONST_INT_P (XEXP (temp, 1))
5681 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5682 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5683 INTVAL (XEXP (temp, 1)));
5685 /* If X has only a single bit that might be nonzero, say, bit I, convert
5686 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5687 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5688 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5689 or a SUBREG of one since we'd be making the expression more
5690 complex if it was just a register. */
5692 if (!REG_P (temp)
5693 && ! (GET_CODE (temp) == SUBREG
5694 && REG_P (SUBREG_REG (temp)))
5695 && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5697 rtx temp1 = simplify_shift_const
5698 (NULL_RTX, ASHIFTRT, mode,
5699 simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5700 GET_MODE_PRECISION (mode) - 1 - i),
5701 GET_MODE_PRECISION (mode) - 1 - i);
5703 /* If all we did was surround TEMP with the two shifts, we
5704 haven't improved anything, so don't use it. Otherwise,
5705 we are better off with TEMP1. */
5706 if (GET_CODE (temp1) != ASHIFTRT
5707 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5708 || XEXP (XEXP (temp1, 0), 0) != temp)
5709 return temp1;
5711 break;
5713 case TRUNCATE:
5714 /* We can't handle truncation to a partial integer mode here
5715 because we don't know the real bitsize of the partial
5716 integer mode. */
5717 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5718 break;
5720 if (HWI_COMPUTABLE_MODE_P (mode))
5721 SUBST (XEXP (x, 0),
5722 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5723 GET_MODE_MASK (mode), 0));
5725 /* We can truncate a constant value and return it. */
5726 if (CONST_INT_P (XEXP (x, 0)))
5727 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5729 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5730 whose value is a comparison can be replaced with a subreg if
5731 STORE_FLAG_VALUE permits. */
5732 if (HWI_COMPUTABLE_MODE_P (mode)
5733 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5734 && (temp = get_last_value (XEXP (x, 0)))
5735 && COMPARISON_P (temp))
5736 return gen_lowpart (mode, XEXP (x, 0));
5737 break;
5739 case CONST:
5740 /* (const (const X)) can become (const X). Do it this way rather than
5741 returning the inner CONST since CONST can be shared with a
5742 REG_EQUAL note. */
5743 if (GET_CODE (XEXP (x, 0)) == CONST)
5744 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5745 break;
5747 #ifdef HAVE_lo_sum
5748 case LO_SUM:
5749 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5750 can add in an offset. find_split_point will split this address up
5751 again if it doesn't match. */
5752 if (GET_CODE (XEXP (x, 0)) == HIGH
5753 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5754 return XEXP (x, 1);
5755 break;
5756 #endif
5758 case PLUS:
5759 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5760 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5761 bit-field and can be replaced by either a sign_extend or a
5762 sign_extract. The `and' may be a zero_extend and the two
5763 <c>, -<c> constants may be reversed. */
5764 if (GET_CODE (XEXP (x, 0)) == XOR
5765 && CONST_INT_P (XEXP (x, 1))
5766 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5767 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5768 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5769 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5770 && HWI_COMPUTABLE_MODE_P (mode)
5771 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5772 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5773 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5774 == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5775 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5776 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5777 == (unsigned int) i + 1))))
5778 return simplify_shift_const
5779 (NULL_RTX, ASHIFTRT, mode,
5780 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5781 XEXP (XEXP (XEXP (x, 0), 0), 0),
5782 GET_MODE_PRECISION (mode) - (i + 1)),
5783 GET_MODE_PRECISION (mode) - (i + 1));
5785 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5786 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5787 the bitsize of the mode - 1. This allows simplification of
5788 "a = (b & 8) == 0;" */
5789 if (XEXP (x, 1) == constm1_rtx
5790 && !REG_P (XEXP (x, 0))
5791 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5792 && REG_P (SUBREG_REG (XEXP (x, 0))))
5793 && nonzero_bits (XEXP (x, 0), mode) == 1)
5794 return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5795 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5796 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5797 GET_MODE_PRECISION (mode) - 1),
5798 GET_MODE_PRECISION (mode) - 1);
5800 /* If we are adding two things that have no bits in common, convert
5801 the addition into an IOR. This will often be further simplified,
5802 for example in cases like ((a & 1) + (a & 2)), which can
5803 become a & 3. */
5805 if (HWI_COMPUTABLE_MODE_P (mode)
5806 && (nonzero_bits (XEXP (x, 0), mode)
5807 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5809 /* Try to simplify the expression further. */
5810 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5811 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5813 /* If we could, great. If not, do not go ahead with the IOR
5814 replacement, since PLUS appears in many special purpose
5815 address arithmetic instructions. */
5816 if (GET_CODE (temp) != CLOBBER
5817 && (GET_CODE (temp) != IOR
5818 || ((XEXP (temp, 0) != XEXP (x, 0)
5819 || XEXP (temp, 1) != XEXP (x, 1))
5820 && (XEXP (temp, 0) != XEXP (x, 1)
5821 || XEXP (temp, 1) != XEXP (x, 0)))))
5822 return temp;
5824 break;
5826 case MINUS:
5827 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5828 (and <foo> (const_int pow2-1)) */
5829 if (GET_CODE (XEXP (x, 1)) == AND
5830 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5831 && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5832 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5833 return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5834 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5835 break;
5837 case MULT:
5838 /* If we have (mult (plus A B) C), apply the distributive law and then
5839 the inverse distributive law to see if things simplify. This
5840 occurs mostly in addresses, often when unrolling loops. */
5842 if (GET_CODE (XEXP (x, 0)) == PLUS)
5844 rtx result = distribute_and_simplify_rtx (x, 0);
5845 if (result)
5846 return result;
5849 /* Try simplify a*(b/c) as (a*b)/c. */
5850 if (FLOAT_MODE_P (mode) && flag_associative_math
5851 && GET_CODE (XEXP (x, 0)) == DIV)
5853 rtx tem = simplify_binary_operation (MULT, mode,
5854 XEXP (XEXP (x, 0), 0),
5855 XEXP (x, 1));
5856 if (tem)
5857 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5859 break;
5861 case UDIV:
5862 /* If this is a divide by a power of two, treat it as a shift if
5863 its first operand is a shift. */
5864 if (CONST_INT_P (XEXP (x, 1))
5865 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5866 && (GET_CODE (XEXP (x, 0)) == ASHIFT
5867 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5868 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5869 || GET_CODE (XEXP (x, 0)) == ROTATE
5870 || GET_CODE (XEXP (x, 0)) == ROTATERT))
5871 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5872 break;
5874 case EQ: case NE:
5875 case GT: case GTU: case GE: case GEU:
5876 case LT: case LTU: case LE: case LEU:
5877 case UNEQ: case LTGT:
5878 case UNGT: case UNGE:
5879 case UNLT: case UNLE:
5880 case UNORDERED: case ORDERED:
5881 /* If the first operand is a condition code, we can't do anything
5882 with it. */
5883 if (GET_CODE (XEXP (x, 0)) == COMPARE
5884 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5885 && ! CC0_P (XEXP (x, 0))))
5887 rtx op0 = XEXP (x, 0);
5888 rtx op1 = XEXP (x, 1);
5889 enum rtx_code new_code;
5891 if (GET_CODE (op0) == COMPARE)
5892 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5894 /* Simplify our comparison, if possible. */
5895 new_code = simplify_comparison (code, &op0, &op1);
5897 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5898 if only the low-order bit is possibly nonzero in X (such as when
5899 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5900 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5901 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5902 (plus X 1).
5904 Remove any ZERO_EXTRACT we made when thinking this was a
5905 comparison. It may now be simpler to use, e.g., an AND. If a
5906 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5907 the call to make_compound_operation in the SET case.
5909 Don't apply these optimizations if the caller would
5910 prefer a comparison rather than a value.
5911 E.g., for the condition in an IF_THEN_ELSE most targets need
5912 an explicit comparison. */
5914 if (in_cond)
5917 else if (STORE_FLAG_VALUE == 1
5918 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5919 && op1 == const0_rtx
5920 && mode == GET_MODE (op0)
5921 && nonzero_bits (op0, mode) == 1)
5922 return gen_lowpart (mode,
5923 expand_compound_operation (op0));
5925 else if (STORE_FLAG_VALUE == 1
5926 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5927 && op1 == const0_rtx
5928 && mode == GET_MODE (op0)
5929 && (num_sign_bit_copies (op0, mode)
5930 == GET_MODE_PRECISION (mode)))
5932 op0 = expand_compound_operation (op0);
5933 return simplify_gen_unary (NEG, mode,
5934 gen_lowpart (mode, op0),
5935 mode);
5938 else if (STORE_FLAG_VALUE == 1
5939 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5940 && op1 == const0_rtx
5941 && mode == GET_MODE (op0)
5942 && nonzero_bits (op0, mode) == 1)
5944 op0 = expand_compound_operation (op0);
5945 return simplify_gen_binary (XOR, mode,
5946 gen_lowpart (mode, op0),
5947 const1_rtx);
5950 else if (STORE_FLAG_VALUE == 1
5951 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5952 && op1 == const0_rtx
5953 && mode == GET_MODE (op0)
5954 && (num_sign_bit_copies (op0, mode)
5955 == GET_MODE_PRECISION (mode)))
5957 op0 = expand_compound_operation (op0);
5958 return plus_constant (mode, gen_lowpart (mode, op0), 1);
5961 /* If STORE_FLAG_VALUE is -1, we have cases similar to
5962 those above. */
5963 if (in_cond)
5966 else if (STORE_FLAG_VALUE == -1
5967 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5968 && op1 == const0_rtx
5969 && mode == GET_MODE (op0)
5970 && (num_sign_bit_copies (op0, mode)
5971 == GET_MODE_PRECISION (mode)))
5972 return gen_lowpart (mode,
5973 expand_compound_operation (op0));
5975 else if (STORE_FLAG_VALUE == -1
5976 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5977 && op1 == const0_rtx
5978 && mode == GET_MODE (op0)
5979 && nonzero_bits (op0, mode) == 1)
5981 op0 = expand_compound_operation (op0);
5982 return simplify_gen_unary (NEG, mode,
5983 gen_lowpart (mode, op0),
5984 mode);
5987 else if (STORE_FLAG_VALUE == -1
5988 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5989 && op1 == const0_rtx
5990 && mode == GET_MODE (op0)
5991 && (num_sign_bit_copies (op0, mode)
5992 == GET_MODE_PRECISION (mode)))
5994 op0 = expand_compound_operation (op0);
5995 return simplify_gen_unary (NOT, mode,
5996 gen_lowpart (mode, op0),
5997 mode);
6000 /* If X is 0/1, (eq X 0) is X-1. */
6001 else if (STORE_FLAG_VALUE == -1
6002 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6003 && op1 == const0_rtx
6004 && mode == GET_MODE (op0)
6005 && nonzero_bits (op0, mode) == 1)
6007 op0 = expand_compound_operation (op0);
6008 return plus_constant (mode, gen_lowpart (mode, op0), -1);
6011 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6012 one bit that might be nonzero, we can convert (ne x 0) to
6013 (ashift x c) where C puts the bit in the sign bit. Remove any
6014 AND with STORE_FLAG_VALUE when we are done, since we are only
6015 going to test the sign bit. */
6016 if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6017 && HWI_COMPUTABLE_MODE_P (mode)
6018 && val_signbit_p (mode, STORE_FLAG_VALUE)
6019 && op1 == const0_rtx
6020 && mode == GET_MODE (op0)
6021 && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
6023 x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6024 expand_compound_operation (op0),
6025 GET_MODE_PRECISION (mode) - 1 - i);
6026 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6027 return XEXP (x, 0);
6028 else
6029 return x;
6032 /* If the code changed, return a whole new comparison.
6033 We also need to avoid using SUBST in cases where
6034 simplify_comparison has widened a comparison with a CONST_INT,
6035 since in that case the wider CONST_INT may fail the sanity
6036 checks in do_SUBST. */
6037 if (new_code != code
6038 || (CONST_INT_P (op1)
6039 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6040 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6041 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6043 /* Otherwise, keep this operation, but maybe change its operands.
6044 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6045 SUBST (XEXP (x, 0), op0);
6046 SUBST (XEXP (x, 1), op1);
6048 break;
6050 case IF_THEN_ELSE:
6051 return simplify_if_then_else (x);
6053 case ZERO_EXTRACT:
6054 case SIGN_EXTRACT:
6055 case ZERO_EXTEND:
6056 case SIGN_EXTEND:
6057 /* If we are processing SET_DEST, we are done. */
6058 if (in_dest)
6059 return x;
6061 return expand_compound_operation (x);
6063 case SET:
6064 return simplify_set (x);
6066 case AND:
6067 case IOR:
6068 return simplify_logical (x);
6070 case ASHIFT:
6071 case LSHIFTRT:
6072 case ASHIFTRT:
6073 case ROTATE:
6074 case ROTATERT:
6075 /* If this is a shift by a constant amount, simplify it. */
6076 if (CONST_INT_P (XEXP (x, 1)))
6077 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6078 INTVAL (XEXP (x, 1)));
6080 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6081 SUBST (XEXP (x, 1),
6082 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6083 ((unsigned HOST_WIDE_INT) 1
6084 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6085 - 1,
6086 0));
6087 break;
6089 default:
6090 break;
6093 return x;
6096 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6098 static rtx
6099 simplify_if_then_else (rtx x)
6101 machine_mode mode = GET_MODE (x);
6102 rtx cond = XEXP (x, 0);
6103 rtx true_rtx = XEXP (x, 1);
6104 rtx false_rtx = XEXP (x, 2);
6105 enum rtx_code true_code = GET_CODE (cond);
6106 int comparison_p = COMPARISON_P (cond);
6107 rtx temp;
6108 int i;
6109 enum rtx_code false_code;
6110 rtx reversed;
6112 /* Simplify storing of the truth value. */
6113 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6114 return simplify_gen_relational (true_code, mode, VOIDmode,
6115 XEXP (cond, 0), XEXP (cond, 1));
6117 /* Also when the truth value has to be reversed. */
6118 if (comparison_p
6119 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6120 && (reversed = reversed_comparison (cond, mode)))
6121 return reversed;
6123 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6124 in it is being compared against certain values. Get the true and false
6125 comparisons and see if that says anything about the value of each arm. */
6127 if (comparison_p
6128 && ((false_code = reversed_comparison_code (cond, NULL))
6129 != UNKNOWN)
6130 && REG_P (XEXP (cond, 0)))
6132 HOST_WIDE_INT nzb;
6133 rtx from = XEXP (cond, 0);
6134 rtx true_val = XEXP (cond, 1);
6135 rtx false_val = true_val;
6136 int swapped = 0;
6138 /* If FALSE_CODE is EQ, swap the codes and arms. */
6140 if (false_code == EQ)
6142 swapped = 1, true_code = EQ, false_code = NE;
6143 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
6146 /* If we are comparing against zero and the expression being tested has
6147 only a single bit that might be nonzero, that is its value when it is
6148 not equal to zero. Similarly if it is known to be -1 or 0. */
6150 if (true_code == EQ && true_val == const0_rtx
6151 && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
6153 false_code = EQ;
6154 false_val = gen_int_mode (nzb, GET_MODE (from));
6156 else if (true_code == EQ && true_val == const0_rtx
6157 && (num_sign_bit_copies (from, GET_MODE (from))
6158 == GET_MODE_PRECISION (GET_MODE (from))))
6160 false_code = EQ;
6161 false_val = constm1_rtx;
6164 /* Now simplify an arm if we know the value of the register in the
6165 branch and it is used in the arm. Be careful due to the potential
6166 of locally-shared RTL. */
6168 if (reg_mentioned_p (from, true_rtx))
6169 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6170 from, true_val),
6171 pc_rtx, pc_rtx, 0, 0, 0);
6172 if (reg_mentioned_p (from, false_rtx))
6173 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6174 from, false_val),
6175 pc_rtx, pc_rtx, 0, 0, 0);
6177 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6178 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6180 true_rtx = XEXP (x, 1);
6181 false_rtx = XEXP (x, 2);
6182 true_code = GET_CODE (cond);
6185 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6186 reversed, do so to avoid needing two sets of patterns for
6187 subtract-and-branch insns. Similarly if we have a constant in the true
6188 arm, the false arm is the same as the first operand of the comparison, or
6189 the false arm is more complicated than the true arm. */
6191 if (comparison_p
6192 && reversed_comparison_code (cond, NULL) != UNKNOWN
6193 && (true_rtx == pc_rtx
6194 || (CONSTANT_P (true_rtx)
6195 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6196 || true_rtx == const0_rtx
6197 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6198 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6199 && !OBJECT_P (false_rtx))
6200 || reg_mentioned_p (true_rtx, false_rtx)
6201 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6203 true_code = reversed_comparison_code (cond, NULL);
6204 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6205 SUBST (XEXP (x, 1), false_rtx);
6206 SUBST (XEXP (x, 2), true_rtx);
6208 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
6209 cond = XEXP (x, 0);
6211 /* It is possible that the conditional has been simplified out. */
6212 true_code = GET_CODE (cond);
6213 comparison_p = COMPARISON_P (cond);
6216 /* If the two arms are identical, we don't need the comparison. */
6218 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6219 return true_rtx;
6221 /* Convert a == b ? b : a to "a". */
6222 if (true_code == EQ && ! side_effects_p (cond)
6223 && !HONOR_NANS (mode)
6224 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6225 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6226 return false_rtx;
6227 else if (true_code == NE && ! side_effects_p (cond)
6228 && !HONOR_NANS (mode)
6229 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6230 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6231 return true_rtx;
6233 /* Look for cases where we have (abs x) or (neg (abs X)). */
6235 if (GET_MODE_CLASS (mode) == MODE_INT
6236 && comparison_p
6237 && XEXP (cond, 1) == const0_rtx
6238 && GET_CODE (false_rtx) == NEG
6239 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6240 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6241 && ! side_effects_p (true_rtx))
6242 switch (true_code)
6244 case GT:
6245 case GE:
6246 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6247 case LT:
6248 case LE:
6249 return
6250 simplify_gen_unary (NEG, mode,
6251 simplify_gen_unary (ABS, mode, true_rtx, mode),
6252 mode);
6253 default:
6254 break;
6257 /* Look for MIN or MAX. */
6259 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6260 && comparison_p
6261 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6262 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6263 && ! side_effects_p (cond))
6264 switch (true_code)
6266 case GE:
6267 case GT:
6268 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6269 case LE:
6270 case LT:
6271 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6272 case GEU:
6273 case GTU:
6274 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6275 case LEU:
6276 case LTU:
6277 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6278 default:
6279 break;
6282 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6283 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6284 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6285 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6286 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6287 neither 1 or -1, but it isn't worth checking for. */
6289 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6290 && comparison_p
6291 && GET_MODE_CLASS (mode) == MODE_INT
6292 && ! side_effects_p (x))
6294 rtx t = make_compound_operation (true_rtx, SET);
6295 rtx f = make_compound_operation (false_rtx, SET);
6296 rtx cond_op0 = XEXP (cond, 0);
6297 rtx cond_op1 = XEXP (cond, 1);
6298 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6299 machine_mode m = mode;
6300 rtx z = 0, c1 = NULL_RTX;
6302 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6303 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6304 || GET_CODE (t) == ASHIFT
6305 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6306 && rtx_equal_p (XEXP (t, 0), f))
6307 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6309 /* If an identity-zero op is commutative, check whether there
6310 would be a match if we swapped the operands. */
6311 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6312 || GET_CODE (t) == XOR)
6313 && rtx_equal_p (XEXP (t, 1), f))
6314 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6315 else if (GET_CODE (t) == SIGN_EXTEND
6316 && (GET_CODE (XEXP (t, 0)) == PLUS
6317 || GET_CODE (XEXP (t, 0)) == MINUS
6318 || GET_CODE (XEXP (t, 0)) == IOR
6319 || GET_CODE (XEXP (t, 0)) == XOR
6320 || GET_CODE (XEXP (t, 0)) == ASHIFT
6321 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6322 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6323 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6324 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6325 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6326 && (num_sign_bit_copies (f, GET_MODE (f))
6327 > (unsigned int)
6328 (GET_MODE_PRECISION (mode)
6329 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6331 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6332 extend_op = SIGN_EXTEND;
6333 m = GET_MODE (XEXP (t, 0));
6335 else if (GET_CODE (t) == SIGN_EXTEND
6336 && (GET_CODE (XEXP (t, 0)) == PLUS
6337 || GET_CODE (XEXP (t, 0)) == IOR
6338 || GET_CODE (XEXP (t, 0)) == XOR)
6339 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6340 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6341 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6342 && (num_sign_bit_copies (f, GET_MODE (f))
6343 > (unsigned int)
6344 (GET_MODE_PRECISION (mode)
6345 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6347 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6348 extend_op = SIGN_EXTEND;
6349 m = GET_MODE (XEXP (t, 0));
6351 else if (GET_CODE (t) == ZERO_EXTEND
6352 && (GET_CODE (XEXP (t, 0)) == PLUS
6353 || GET_CODE (XEXP (t, 0)) == MINUS
6354 || GET_CODE (XEXP (t, 0)) == IOR
6355 || GET_CODE (XEXP (t, 0)) == XOR
6356 || GET_CODE (XEXP (t, 0)) == ASHIFT
6357 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6358 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6359 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6360 && HWI_COMPUTABLE_MODE_P (mode)
6361 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6362 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6363 && ((nonzero_bits (f, GET_MODE (f))
6364 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6365 == 0))
6367 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6368 extend_op = ZERO_EXTEND;
6369 m = GET_MODE (XEXP (t, 0));
6371 else if (GET_CODE (t) == ZERO_EXTEND
6372 && (GET_CODE (XEXP (t, 0)) == PLUS
6373 || GET_CODE (XEXP (t, 0)) == IOR
6374 || GET_CODE (XEXP (t, 0)) == XOR)
6375 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6376 && HWI_COMPUTABLE_MODE_P (mode)
6377 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6378 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6379 && ((nonzero_bits (f, GET_MODE (f))
6380 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6381 == 0))
6383 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6384 extend_op = ZERO_EXTEND;
6385 m = GET_MODE (XEXP (t, 0));
6388 if (z)
6390 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6391 cond_op0, cond_op1),
6392 pc_rtx, pc_rtx, 0, 0, 0);
6393 temp = simplify_gen_binary (MULT, m, temp,
6394 simplify_gen_binary (MULT, m, c1,
6395 const_true_rtx));
6396 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6397 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6399 if (extend_op != UNKNOWN)
6400 temp = simplify_gen_unary (extend_op, mode, temp, m);
6402 return temp;
6406 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6407 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6408 negation of a single bit, we can convert this operation to a shift. We
6409 can actually do this more generally, but it doesn't seem worth it. */
6411 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6412 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6413 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6414 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6415 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6416 == GET_MODE_PRECISION (mode))
6417 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6418 return
6419 simplify_shift_const (NULL_RTX, ASHIFT, mode,
6420 gen_lowpart (mode, XEXP (cond, 0)), i);
6422 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6423 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6424 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6425 && GET_MODE (XEXP (cond, 0)) == mode
6426 && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6427 == nonzero_bits (XEXP (cond, 0), mode)
6428 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6429 return XEXP (cond, 0);
6431 return x;
6434 /* Simplify X, a SET expression. Return the new expression. */
6436 static rtx
6437 simplify_set (rtx x)
6439 rtx src = SET_SRC (x);
6440 rtx dest = SET_DEST (x);
6441 machine_mode mode
6442 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6443 rtx_insn *other_insn;
6444 rtx *cc_use;
6446 /* (set (pc) (return)) gets written as (return). */
6447 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6448 return src;
6450 /* Now that we know for sure which bits of SRC we are using, see if we can
6451 simplify the expression for the object knowing that we only need the
6452 low-order bits. */
6454 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6456 src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6457 SUBST (SET_SRC (x), src);
6460 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6461 the comparison result and try to simplify it unless we already have used
6462 undobuf.other_insn. */
6463 if ((GET_MODE_CLASS (mode) == MODE_CC
6464 || GET_CODE (src) == COMPARE
6465 || CC0_P (dest))
6466 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6467 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6468 && COMPARISON_P (*cc_use)
6469 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6471 enum rtx_code old_code = GET_CODE (*cc_use);
6472 enum rtx_code new_code;
6473 rtx op0, op1, tmp;
6474 int other_changed = 0;
6475 rtx inner_compare = NULL_RTX;
6476 machine_mode compare_mode = GET_MODE (dest);
6478 if (GET_CODE (src) == COMPARE)
6480 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6481 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6483 inner_compare = op0;
6484 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6487 else
6488 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6490 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6491 op0, op1);
6492 if (!tmp)
6493 new_code = old_code;
6494 else if (!CONSTANT_P (tmp))
6496 new_code = GET_CODE (tmp);
6497 op0 = XEXP (tmp, 0);
6498 op1 = XEXP (tmp, 1);
6500 else
6502 rtx pat = PATTERN (other_insn);
6503 undobuf.other_insn = other_insn;
6504 SUBST (*cc_use, tmp);
6506 /* Attempt to simplify CC user. */
6507 if (GET_CODE (pat) == SET)
6509 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6510 if (new_rtx != NULL_RTX)
6511 SUBST (SET_SRC (pat), new_rtx);
6514 /* Convert X into a no-op move. */
6515 SUBST (SET_DEST (x), pc_rtx);
6516 SUBST (SET_SRC (x), pc_rtx);
6517 return x;
6520 /* Simplify our comparison, if possible. */
6521 new_code = simplify_comparison (new_code, &op0, &op1);
6523 #ifdef SELECT_CC_MODE
6524 /* If this machine has CC modes other than CCmode, check to see if we
6525 need to use a different CC mode here. */
6526 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6527 compare_mode = GET_MODE (op0);
6528 else if (inner_compare
6529 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6530 && new_code == old_code
6531 && op0 == XEXP (inner_compare, 0)
6532 && op1 == XEXP (inner_compare, 1))
6533 compare_mode = GET_MODE (inner_compare);
6534 else
6535 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6537 #ifndef HAVE_cc0
6538 /* If the mode changed, we have to change SET_DEST, the mode in the
6539 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6540 a hard register, just build new versions with the proper mode. If it
6541 is a pseudo, we lose unless it is only time we set the pseudo, in
6542 which case we can safely change its mode. */
6543 if (compare_mode != GET_MODE (dest))
6545 if (can_change_dest_mode (dest, 0, compare_mode))
6547 unsigned int regno = REGNO (dest);
6548 rtx new_dest;
6550 if (regno < FIRST_PSEUDO_REGISTER)
6551 new_dest = gen_rtx_REG (compare_mode, regno);
6552 else
6554 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6555 new_dest = regno_reg_rtx[regno];
6558 SUBST (SET_DEST (x), new_dest);
6559 SUBST (XEXP (*cc_use, 0), new_dest);
6560 other_changed = 1;
6562 dest = new_dest;
6565 #endif /* cc0 */
6566 #endif /* SELECT_CC_MODE */
6568 /* If the code changed, we have to build a new comparison in
6569 undobuf.other_insn. */
6570 if (new_code != old_code)
6572 int other_changed_previously = other_changed;
6573 unsigned HOST_WIDE_INT mask;
6574 rtx old_cc_use = *cc_use;
6576 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6577 dest, const0_rtx));
6578 other_changed = 1;
6580 /* If the only change we made was to change an EQ into an NE or
6581 vice versa, OP0 has only one bit that might be nonzero, and OP1
6582 is zero, check if changing the user of the condition code will
6583 produce a valid insn. If it won't, we can keep the original code
6584 in that insn by surrounding our operation with an XOR. */
6586 if (((old_code == NE && new_code == EQ)
6587 || (old_code == EQ && new_code == NE))
6588 && ! other_changed_previously && op1 == const0_rtx
6589 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6590 && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6592 rtx pat = PATTERN (other_insn), note = 0;
6594 if ((recog_for_combine (&pat, other_insn, &note) < 0
6595 && ! check_asm_operands (pat)))
6597 *cc_use = old_cc_use;
6598 other_changed = 0;
6600 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6601 gen_int_mode (mask,
6602 GET_MODE (op0)));
6607 if (other_changed)
6608 undobuf.other_insn = other_insn;
6610 /* Otherwise, if we didn't previously have a COMPARE in the
6611 correct mode, we need one. */
6612 if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
6614 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6615 src = SET_SRC (x);
6617 else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6619 SUBST (SET_SRC (x), op0);
6620 src = SET_SRC (x);
6622 /* Otherwise, update the COMPARE if needed. */
6623 else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6625 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6626 src = SET_SRC (x);
6629 else
6631 /* Get SET_SRC in a form where we have placed back any
6632 compound expressions. Then do the checks below. */
6633 src = make_compound_operation (src, SET);
6634 SUBST (SET_SRC (x), src);
6637 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6638 and X being a REG or (subreg (reg)), we may be able to convert this to
6639 (set (subreg:m2 x) (op)).
6641 We can always do this if M1 is narrower than M2 because that means that
6642 we only care about the low bits of the result.
6644 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6645 perform a narrower operation than requested since the high-order bits will
6646 be undefined. On machine where it is defined, this transformation is safe
6647 as long as M1 and M2 have the same number of words. */
6649 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6650 && !OBJECT_P (SUBREG_REG (src))
6651 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6652 / UNITS_PER_WORD)
6653 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6654 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6655 #ifndef WORD_REGISTER_OPERATIONS
6656 && (GET_MODE_SIZE (GET_MODE (src))
6657 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6658 #endif
6659 #ifdef CANNOT_CHANGE_MODE_CLASS
6660 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6661 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6662 GET_MODE (SUBREG_REG (src)),
6663 GET_MODE (src)))
6664 #endif
6665 && (REG_P (dest)
6666 || (GET_CODE (dest) == SUBREG
6667 && REG_P (SUBREG_REG (dest)))))
6669 SUBST (SET_DEST (x),
6670 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6671 dest));
6672 SUBST (SET_SRC (x), SUBREG_REG (src));
6674 src = SET_SRC (x), dest = SET_DEST (x);
6677 #ifdef HAVE_cc0
6678 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6679 in SRC. */
6680 if (dest == cc0_rtx
6681 && GET_CODE (src) == SUBREG
6682 && subreg_lowpart_p (src)
6683 && (GET_MODE_PRECISION (GET_MODE (src))
6684 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
6686 rtx inner = SUBREG_REG (src);
6687 machine_mode inner_mode = GET_MODE (inner);
6689 /* Here we make sure that we don't have a sign bit on. */
6690 if (val_signbit_known_clear_p (GET_MODE (src),
6691 nonzero_bits (inner, inner_mode)))
6693 SUBST (SET_SRC (x), inner);
6694 src = SET_SRC (x);
6697 #endif
6699 #ifdef LOAD_EXTEND_OP
6700 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6701 would require a paradoxical subreg. Replace the subreg with a
6702 zero_extend to avoid the reload that would otherwise be required. */
6704 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6705 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6706 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6707 && SUBREG_BYTE (src) == 0
6708 && paradoxical_subreg_p (src)
6709 && MEM_P (SUBREG_REG (src)))
6711 SUBST (SET_SRC (x),
6712 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6713 GET_MODE (src), SUBREG_REG (src)));
6715 src = SET_SRC (x);
6717 #endif
6719 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6720 are comparing an item known to be 0 or -1 against 0, use a logical
6721 operation instead. Check for one of the arms being an IOR of the other
6722 arm with some value. We compute three terms to be IOR'ed together. In
6723 practice, at most two will be nonzero. Then we do the IOR's. */
6725 if (GET_CODE (dest) != PC
6726 && GET_CODE (src) == IF_THEN_ELSE
6727 && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6728 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6729 && XEXP (XEXP (src, 0), 1) == const0_rtx
6730 && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6731 #ifdef HAVE_conditional_move
6732 && ! can_conditionally_move_p (GET_MODE (src))
6733 #endif
6734 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6735 GET_MODE (XEXP (XEXP (src, 0), 0)))
6736 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
6737 && ! side_effects_p (src))
6739 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6740 ? XEXP (src, 1) : XEXP (src, 2));
6741 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6742 ? XEXP (src, 2) : XEXP (src, 1));
6743 rtx term1 = const0_rtx, term2, term3;
6745 if (GET_CODE (true_rtx) == IOR
6746 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6747 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6748 else if (GET_CODE (true_rtx) == IOR
6749 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6750 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6751 else if (GET_CODE (false_rtx) == IOR
6752 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6753 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6754 else if (GET_CODE (false_rtx) == IOR
6755 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6756 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6758 term2 = simplify_gen_binary (AND, GET_MODE (src),
6759 XEXP (XEXP (src, 0), 0), true_rtx);
6760 term3 = simplify_gen_binary (AND, GET_MODE (src),
6761 simplify_gen_unary (NOT, GET_MODE (src),
6762 XEXP (XEXP (src, 0), 0),
6763 GET_MODE (src)),
6764 false_rtx);
6766 SUBST (SET_SRC (x),
6767 simplify_gen_binary (IOR, GET_MODE (src),
6768 simplify_gen_binary (IOR, GET_MODE (src),
6769 term1, term2),
6770 term3));
6772 src = SET_SRC (x);
6775 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6776 whole thing fail. */
6777 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6778 return src;
6779 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6780 return dest;
6781 else
6782 /* Convert this into a field assignment operation, if possible. */
6783 return make_field_assignment (x);
6786 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6787 result. */
6789 static rtx
6790 simplify_logical (rtx x)
6792 machine_mode mode = GET_MODE (x);
6793 rtx op0 = XEXP (x, 0);
6794 rtx op1 = XEXP (x, 1);
6796 switch (GET_CODE (x))
6798 case AND:
6799 /* We can call simplify_and_const_int only if we don't lose
6800 any (sign) bits when converting INTVAL (op1) to
6801 "unsigned HOST_WIDE_INT". */
6802 if (CONST_INT_P (op1)
6803 && (HWI_COMPUTABLE_MODE_P (mode)
6804 || INTVAL (op1) > 0))
6806 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6807 if (GET_CODE (x) != AND)
6808 return x;
6810 op0 = XEXP (x, 0);
6811 op1 = XEXP (x, 1);
6814 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6815 apply the distributive law and then the inverse distributive
6816 law to see if things simplify. */
6817 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6819 rtx result = distribute_and_simplify_rtx (x, 0);
6820 if (result)
6821 return result;
6823 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6825 rtx result = distribute_and_simplify_rtx (x, 1);
6826 if (result)
6827 return result;
6829 break;
6831 case IOR:
6832 /* If we have (ior (and A B) C), apply the distributive law and then
6833 the inverse distributive law to see if things simplify. */
6835 if (GET_CODE (op0) == AND)
6837 rtx result = distribute_and_simplify_rtx (x, 0);
6838 if (result)
6839 return result;
6842 if (GET_CODE (op1) == AND)
6844 rtx result = distribute_and_simplify_rtx (x, 1);
6845 if (result)
6846 return result;
6848 break;
6850 default:
6851 gcc_unreachable ();
6854 return x;
6857 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6858 operations" because they can be replaced with two more basic operations.
6859 ZERO_EXTEND is also considered "compound" because it can be replaced with
6860 an AND operation, which is simpler, though only one operation.
6862 The function expand_compound_operation is called with an rtx expression
6863 and will convert it to the appropriate shifts and AND operations,
6864 simplifying at each stage.
6866 The function make_compound_operation is called to convert an expression
6867 consisting of shifts and ANDs into the equivalent compound expression.
6868 It is the inverse of this function, loosely speaking. */
6870 static rtx
6871 expand_compound_operation (rtx x)
6873 unsigned HOST_WIDE_INT pos = 0, len;
6874 int unsignedp = 0;
6875 unsigned int modewidth;
6876 rtx tem;
6878 switch (GET_CODE (x))
6880 case ZERO_EXTEND:
6881 unsignedp = 1;
6882 case SIGN_EXTEND:
6883 /* We can't necessarily use a const_int for a multiword mode;
6884 it depends on implicitly extending the value.
6885 Since we don't know the right way to extend it,
6886 we can't tell whether the implicit way is right.
6888 Even for a mode that is no wider than a const_int,
6889 we can't win, because we need to sign extend one of its bits through
6890 the rest of it, and we don't know which bit. */
6891 if (CONST_INT_P (XEXP (x, 0)))
6892 return x;
6894 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6895 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6896 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6897 reloaded. If not for that, MEM's would very rarely be safe.
6899 Reject MODEs bigger than a word, because we might not be able
6900 to reference a two-register group starting with an arbitrary register
6901 (and currently gen_lowpart might crash for a SUBREG). */
6903 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6904 return x;
6906 /* Reject MODEs that aren't scalar integers because turning vector
6907 or complex modes into shifts causes problems. */
6909 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6910 return x;
6912 len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
6913 /* If the inner object has VOIDmode (the only way this can happen
6914 is if it is an ASM_OPERANDS), we can't do anything since we don't
6915 know how much masking to do. */
6916 if (len == 0)
6917 return x;
6919 break;
6921 case ZERO_EXTRACT:
6922 unsignedp = 1;
6924 /* ... fall through ... */
6926 case SIGN_EXTRACT:
6927 /* If the operand is a CLOBBER, just return it. */
6928 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
6929 return XEXP (x, 0);
6931 if (!CONST_INT_P (XEXP (x, 1))
6932 || !CONST_INT_P (XEXP (x, 2))
6933 || GET_MODE (XEXP (x, 0)) == VOIDmode)
6934 return x;
6936 /* Reject MODEs that aren't scalar integers because turning vector
6937 or complex modes into shifts causes problems. */
6939 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6940 return x;
6942 len = INTVAL (XEXP (x, 1));
6943 pos = INTVAL (XEXP (x, 2));
6945 /* This should stay within the object being extracted, fail otherwise. */
6946 if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
6947 return x;
6949 if (BITS_BIG_ENDIAN)
6950 pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
6952 break;
6954 default:
6955 return x;
6957 /* Convert sign extension to zero extension, if we know that the high
6958 bit is not set, as this is easier to optimize. It will be converted
6959 back to cheaper alternative in make_extraction. */
6960 if (GET_CODE (x) == SIGN_EXTEND
6961 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6962 && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
6963 & ~(((unsigned HOST_WIDE_INT)
6964 GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
6965 >> 1))
6966 == 0)))
6968 rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
6969 rtx temp2 = expand_compound_operation (temp);
6971 /* Make sure this is a profitable operation. */
6972 if (set_src_cost (x, optimize_this_for_speed_p)
6973 > set_src_cost (temp2, optimize_this_for_speed_p))
6974 return temp2;
6975 else if (set_src_cost (x, optimize_this_for_speed_p)
6976 > set_src_cost (temp, optimize_this_for_speed_p))
6977 return temp;
6978 else
6979 return x;
6982 /* We can optimize some special cases of ZERO_EXTEND. */
6983 if (GET_CODE (x) == ZERO_EXTEND)
6985 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
6986 know that the last value didn't have any inappropriate bits
6987 set. */
6988 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6989 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6990 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6991 && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
6992 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6993 return XEXP (XEXP (x, 0), 0);
6995 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6996 if (GET_CODE (XEXP (x, 0)) == SUBREG
6997 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6998 && subreg_lowpart_p (XEXP (x, 0))
6999 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7000 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
7001 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7002 return SUBREG_REG (XEXP (x, 0));
7004 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7005 is a comparison and STORE_FLAG_VALUE permits. This is like
7006 the first case, but it works even when GET_MODE (x) is larger
7007 than HOST_WIDE_INT. */
7008 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7009 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
7010 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7011 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7012 <= HOST_BITS_PER_WIDE_INT)
7013 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7014 return XEXP (XEXP (x, 0), 0);
7016 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7017 if (GET_CODE (XEXP (x, 0)) == SUBREG
7018 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
7019 && subreg_lowpart_p (XEXP (x, 0))
7020 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7021 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7022 <= HOST_BITS_PER_WIDE_INT)
7023 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7024 return SUBREG_REG (XEXP (x, 0));
7028 /* If we reach here, we want to return a pair of shifts. The inner
7029 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7030 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7031 logical depending on the value of UNSIGNEDP.
7033 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7034 converted into an AND of a shift.
7036 We must check for the case where the left shift would have a negative
7037 count. This can happen in a case like (x >> 31) & 255 on machines
7038 that can't shift by a constant. On those machines, we would first
7039 combine the shift with the AND to produce a variable-position
7040 extraction. Then the constant of 31 would be substituted in
7041 to produce such a position. */
7043 modewidth = GET_MODE_PRECISION (GET_MODE (x));
7044 if (modewidth >= pos + len)
7046 machine_mode mode = GET_MODE (x);
7047 tem = gen_lowpart (mode, XEXP (x, 0));
7048 if (!tem || GET_CODE (tem) == CLOBBER)
7049 return x;
7050 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7051 tem, modewidth - pos - len);
7052 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7053 mode, tem, modewidth - len);
7055 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7056 tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
7057 simplify_shift_const (NULL_RTX, LSHIFTRT,
7058 GET_MODE (x),
7059 XEXP (x, 0), pos),
7060 ((unsigned HOST_WIDE_INT) 1 << len) - 1);
7061 else
7062 /* Any other cases we can't handle. */
7063 return x;
7065 /* If we couldn't do this for some reason, return the original
7066 expression. */
7067 if (GET_CODE (tem) == CLOBBER)
7068 return x;
7070 return tem;
7073 /* X is a SET which contains an assignment of one object into
7074 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7075 or certain SUBREGS). If possible, convert it into a series of
7076 logical operations.
7078 We half-heartedly support variable positions, but do not at all
7079 support variable lengths. */
7081 static const_rtx
7082 expand_field_assignment (const_rtx x)
7084 rtx inner;
7085 rtx pos; /* Always counts from low bit. */
7086 int len;
7087 rtx mask, cleared, masked;
7088 machine_mode compute_mode;
7090 /* Loop until we find something we can't simplify. */
7091 while (1)
7093 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7094 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7096 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7097 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7098 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7100 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7101 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7103 inner = XEXP (SET_DEST (x), 0);
7104 len = INTVAL (XEXP (SET_DEST (x), 1));
7105 pos = XEXP (SET_DEST (x), 2);
7107 /* A constant position should stay within the width of INNER. */
7108 if (CONST_INT_P (pos)
7109 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7110 break;
7112 if (BITS_BIG_ENDIAN)
7114 if (CONST_INT_P (pos))
7115 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7116 - INTVAL (pos));
7117 else if (GET_CODE (pos) == MINUS
7118 && CONST_INT_P (XEXP (pos, 1))
7119 && (INTVAL (XEXP (pos, 1))
7120 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7121 /* If position is ADJUST - X, new position is X. */
7122 pos = XEXP (pos, 0);
7123 else
7125 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7126 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7127 gen_int_mode (prec - len,
7128 GET_MODE (pos)),
7129 pos);
7134 /* A SUBREG between two modes that occupy the same numbers of words
7135 can be done by moving the SUBREG to the source. */
7136 else if (GET_CODE (SET_DEST (x)) == SUBREG
7137 /* We need SUBREGs to compute nonzero_bits properly. */
7138 && nonzero_sign_valid
7139 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7140 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7141 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7142 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7144 x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
7145 gen_lowpart
7146 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7147 SET_SRC (x)));
7148 continue;
7150 else
7151 break;
7153 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7154 inner = SUBREG_REG (inner);
7156 compute_mode = GET_MODE (inner);
7158 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7159 if (! SCALAR_INT_MODE_P (compute_mode))
7161 machine_mode imode;
7163 /* Don't do anything for vector or complex integral types. */
7164 if (! FLOAT_MODE_P (compute_mode))
7165 break;
7167 /* Try to find an integral mode to pun with. */
7168 imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
7169 if (imode == BLKmode)
7170 break;
7172 compute_mode = imode;
7173 inner = gen_lowpart (imode, inner);
7176 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7177 if (len >= HOST_BITS_PER_WIDE_INT)
7178 break;
7180 /* Now compute the equivalent expression. Make a copy of INNER
7181 for the SET_DEST in case it is a MEM into which we will substitute;
7182 we don't want shared RTL in that case. */
7183 mask = gen_int_mode (((unsigned HOST_WIDE_INT) 1 << len) - 1,
7184 compute_mode);
7185 cleared = simplify_gen_binary (AND, compute_mode,
7186 simplify_gen_unary (NOT, compute_mode,
7187 simplify_gen_binary (ASHIFT,
7188 compute_mode,
7189 mask, pos),
7190 compute_mode),
7191 inner);
7192 masked = simplify_gen_binary (ASHIFT, compute_mode,
7193 simplify_gen_binary (
7194 AND, compute_mode,
7195 gen_lowpart (compute_mode, SET_SRC (x)),
7196 mask),
7197 pos);
7199 x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
7200 simplify_gen_binary (IOR, compute_mode,
7201 cleared, masked));
7204 return x;
7207 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7208 it is an RTX that represents the (variable) starting position; otherwise,
7209 POS is the (constant) starting bit position. Both are counted from the LSB.
7211 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7213 IN_DEST is nonzero if this is a reference in the destination of a SET.
7214 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7215 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7216 be used.
7218 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7219 ZERO_EXTRACT should be built even for bits starting at bit 0.
7221 MODE is the desired mode of the result (if IN_DEST == 0).
7223 The result is an RTX for the extraction or NULL_RTX if the target
7224 can't handle it. */
7226 static rtx
7227 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7228 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7229 int in_dest, int in_compare)
7231 /* This mode describes the size of the storage area
7232 to fetch the overall value from. Within that, we
7233 ignore the POS lowest bits, etc. */
7234 machine_mode is_mode = GET_MODE (inner);
7235 machine_mode inner_mode;
7236 machine_mode wanted_inner_mode;
7237 machine_mode wanted_inner_reg_mode = word_mode;
7238 machine_mode pos_mode = word_mode;
7239 machine_mode extraction_mode = word_mode;
7240 machine_mode tmode = mode_for_size (len, MODE_INT, 1);
7241 rtx new_rtx = 0;
7242 rtx orig_pos_rtx = pos_rtx;
7243 HOST_WIDE_INT orig_pos;
7245 if (pos_rtx && CONST_INT_P (pos_rtx))
7246 pos = INTVAL (pos_rtx), pos_rtx = 0;
7248 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7250 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7251 consider just the QI as the memory to extract from.
7252 The subreg adds or removes high bits; its mode is
7253 irrelevant to the meaning of this extraction,
7254 since POS and LEN count from the lsb. */
7255 if (MEM_P (SUBREG_REG (inner)))
7256 is_mode = GET_MODE (SUBREG_REG (inner));
7257 inner = SUBREG_REG (inner);
7259 else if (GET_CODE (inner) == ASHIFT
7260 && CONST_INT_P (XEXP (inner, 1))
7261 && pos_rtx == 0 && pos == 0
7262 && len > UINTVAL (XEXP (inner, 1)))
7264 /* We're extracting the least significant bits of an rtx
7265 (ashift X (const_int C)), where LEN > C. Extract the
7266 least significant (LEN - C) bits of X, giving an rtx
7267 whose mode is MODE, then shift it left C times. */
7268 new_rtx = make_extraction (mode, XEXP (inner, 0),
7269 0, 0, len - INTVAL (XEXP (inner, 1)),
7270 unsignedp, in_dest, in_compare);
7271 if (new_rtx != 0)
7272 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7274 else if (GET_CODE (inner) == TRUNCATE)
7275 inner = XEXP (inner, 0);
7277 inner_mode = GET_MODE (inner);
7279 /* See if this can be done without an extraction. We never can if the
7280 width of the field is not the same as that of some integer mode. For
7281 registers, we can only avoid the extraction if the position is at the
7282 low-order bit and this is either not in the destination or we have the
7283 appropriate STRICT_LOW_PART operation available.
7285 For MEM, we can avoid an extract if the field starts on an appropriate
7286 boundary and we can change the mode of the memory reference. */
7288 if (tmode != BLKmode
7289 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7290 && !MEM_P (inner)
7291 && (inner_mode == tmode
7292 || !REG_P (inner)
7293 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7294 || reg_truncated_to_mode (tmode, inner))
7295 && (! in_dest
7296 || (REG_P (inner)
7297 && have_insn_for (STRICT_LOW_PART, tmode))))
7298 || (MEM_P (inner) && pos_rtx == 0
7299 && (pos
7300 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7301 : BITS_PER_UNIT)) == 0
7302 /* We can't do this if we are widening INNER_MODE (it
7303 may not be aligned, for one thing). */
7304 && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
7305 && (inner_mode == tmode
7306 || (! mode_dependent_address_p (XEXP (inner, 0),
7307 MEM_ADDR_SPACE (inner))
7308 && ! MEM_VOLATILE_P (inner))))))
7310 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7311 field. If the original and current mode are the same, we need not
7312 adjust the offset. Otherwise, we do if bytes big endian.
7314 If INNER is not a MEM, get a piece consisting of just the field
7315 of interest (in this case POS % BITS_PER_WORD must be 0). */
7317 if (MEM_P (inner))
7319 HOST_WIDE_INT offset;
7321 /* POS counts from lsb, but make OFFSET count in memory order. */
7322 if (BYTES_BIG_ENDIAN)
7323 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7324 else
7325 offset = pos / BITS_PER_UNIT;
7327 new_rtx = adjust_address_nv (inner, tmode, offset);
7329 else if (REG_P (inner))
7331 if (tmode != inner_mode)
7333 /* We can't call gen_lowpart in a DEST since we
7334 always want a SUBREG (see below) and it would sometimes
7335 return a new hard register. */
7336 if (pos || in_dest)
7338 HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7340 if (WORDS_BIG_ENDIAN
7341 && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7342 final_word = ((GET_MODE_SIZE (inner_mode)
7343 - GET_MODE_SIZE (tmode))
7344 / UNITS_PER_WORD) - final_word;
7346 final_word *= UNITS_PER_WORD;
7347 if (BYTES_BIG_ENDIAN &&
7348 GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7349 final_word += (GET_MODE_SIZE (inner_mode)
7350 - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7352 /* Avoid creating invalid subregs, for example when
7353 simplifying (x>>32)&255. */
7354 if (!validate_subreg (tmode, inner_mode, inner, final_word))
7355 return NULL_RTX;
7357 new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7359 else
7360 new_rtx = gen_lowpart (tmode, inner);
7362 else
7363 new_rtx = inner;
7365 else
7366 new_rtx = force_to_mode (inner, tmode,
7367 len >= HOST_BITS_PER_WIDE_INT
7368 ? ~(unsigned HOST_WIDE_INT) 0
7369 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
7372 /* If this extraction is going into the destination of a SET,
7373 make a STRICT_LOW_PART unless we made a MEM. */
7375 if (in_dest)
7376 return (MEM_P (new_rtx) ? new_rtx
7377 : (GET_CODE (new_rtx) != SUBREG
7378 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7379 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7381 if (mode == tmode)
7382 return new_rtx;
7384 if (CONST_SCALAR_INT_P (new_rtx))
7385 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7386 mode, new_rtx, tmode);
7388 /* If we know that no extraneous bits are set, and that the high
7389 bit is not set, convert the extraction to the cheaper of
7390 sign and zero extension, that are equivalent in these cases. */
7391 if (flag_expensive_optimizations
7392 && (HWI_COMPUTABLE_MODE_P (tmode)
7393 && ((nonzero_bits (new_rtx, tmode)
7394 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7395 == 0)))
7397 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7398 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7400 /* Prefer ZERO_EXTENSION, since it gives more information to
7401 backends. */
7402 if (set_src_cost (temp, optimize_this_for_speed_p)
7403 <= set_src_cost (temp1, optimize_this_for_speed_p))
7404 return temp;
7405 return temp1;
7408 /* Otherwise, sign- or zero-extend unless we already are in the
7409 proper mode. */
7411 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7412 mode, new_rtx));
7415 /* Unless this is a COMPARE or we have a funny memory reference,
7416 don't do anything with zero-extending field extracts starting at
7417 the low-order bit since they are simple AND operations. */
7418 if (pos_rtx == 0 && pos == 0 && ! in_dest
7419 && ! in_compare && unsignedp)
7420 return 0;
7422 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7423 if the position is not a constant and the length is not 1. In all
7424 other cases, we would only be going outside our object in cases when
7425 an original shift would have been undefined. */
7426 if (MEM_P (inner)
7427 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7428 || (pos_rtx != 0 && len != 1)))
7429 return 0;
7431 enum extraction_pattern pattern = (in_dest ? EP_insv
7432 : unsignedp ? EP_extzv : EP_extv);
7434 /* If INNER is not from memory, we want it to have the mode of a register
7435 extraction pattern's structure operand, or word_mode if there is no
7436 such pattern. The same applies to extraction_mode and pos_mode
7437 and their respective operands.
7439 For memory, assume that the desired extraction_mode and pos_mode
7440 are the same as for a register operation, since at present we don't
7441 have named patterns for aligned memory structures. */
7442 struct extraction_insn insn;
7443 if (get_best_reg_extraction_insn (&insn, pattern,
7444 GET_MODE_BITSIZE (inner_mode), mode))
7446 wanted_inner_reg_mode = insn.struct_mode;
7447 pos_mode = insn.pos_mode;
7448 extraction_mode = insn.field_mode;
7451 /* Never narrow an object, since that might not be safe. */
7453 if (mode != VOIDmode
7454 && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7455 extraction_mode = mode;
7457 if (!MEM_P (inner))
7458 wanted_inner_mode = wanted_inner_reg_mode;
7459 else
7461 /* Be careful not to go beyond the extracted object and maintain the
7462 natural alignment of the memory. */
7463 wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7464 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7465 > GET_MODE_BITSIZE (wanted_inner_mode))
7467 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7468 gcc_assert (wanted_inner_mode != VOIDmode);
7472 orig_pos = pos;
7474 if (BITS_BIG_ENDIAN)
7476 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7477 BITS_BIG_ENDIAN style. If position is constant, compute new
7478 position. Otherwise, build subtraction.
7479 Note that POS is relative to the mode of the original argument.
7480 If it's a MEM we need to recompute POS relative to that.
7481 However, if we're extracting from (or inserting into) a register,
7482 we want to recompute POS relative to wanted_inner_mode. */
7483 int width = (MEM_P (inner)
7484 ? GET_MODE_BITSIZE (is_mode)
7485 : GET_MODE_BITSIZE (wanted_inner_mode));
7487 if (pos_rtx == 0)
7488 pos = width - len - pos;
7489 else
7490 pos_rtx
7491 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7492 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7493 pos_rtx);
7494 /* POS may be less than 0 now, but we check for that below.
7495 Note that it can only be less than 0 if !MEM_P (inner). */
7498 /* If INNER has a wider mode, and this is a constant extraction, try to
7499 make it smaller and adjust the byte to point to the byte containing
7500 the value. */
7501 if (wanted_inner_mode != VOIDmode
7502 && inner_mode != wanted_inner_mode
7503 && ! pos_rtx
7504 && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7505 && MEM_P (inner)
7506 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7507 && ! MEM_VOLATILE_P (inner))
7509 int offset = 0;
7511 /* The computations below will be correct if the machine is big
7512 endian in both bits and bytes or little endian in bits and bytes.
7513 If it is mixed, we must adjust. */
7515 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7516 adjust OFFSET to compensate. */
7517 if (BYTES_BIG_ENDIAN
7518 && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7519 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7521 /* We can now move to the desired byte. */
7522 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7523 * GET_MODE_SIZE (wanted_inner_mode);
7524 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7526 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7527 && is_mode != wanted_inner_mode)
7528 offset = (GET_MODE_SIZE (is_mode)
7529 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7531 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7534 /* If INNER is not memory, get it into the proper mode. If we are changing
7535 its mode, POS must be a constant and smaller than the size of the new
7536 mode. */
7537 else if (!MEM_P (inner))
7539 /* On the LHS, don't create paradoxical subregs implicitely truncating
7540 the register unless TRULY_NOOP_TRUNCATION. */
7541 if (in_dest
7542 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7543 wanted_inner_mode))
7544 return NULL_RTX;
7546 if (GET_MODE (inner) != wanted_inner_mode
7547 && (pos_rtx != 0
7548 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7549 return NULL_RTX;
7551 if (orig_pos < 0)
7552 return NULL_RTX;
7554 inner = force_to_mode (inner, wanted_inner_mode,
7555 pos_rtx
7556 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7557 ? ~(unsigned HOST_WIDE_INT) 0
7558 : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7559 << orig_pos),
7563 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7564 have to zero extend. Otherwise, we can just use a SUBREG. */
7565 if (pos_rtx != 0
7566 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7568 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7569 GET_MODE (pos_rtx));
7571 /* If we know that no extraneous bits are set, and that the high
7572 bit is not set, convert extraction to cheaper one - either
7573 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7574 cases. */
7575 if (flag_expensive_optimizations
7576 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7577 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7578 & ~(((unsigned HOST_WIDE_INT)
7579 GET_MODE_MASK (GET_MODE (pos_rtx)))
7580 >> 1))
7581 == 0)))
7583 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7584 GET_MODE (pos_rtx));
7586 /* Prefer ZERO_EXTENSION, since it gives more information to
7587 backends. */
7588 if (set_src_cost (temp1, optimize_this_for_speed_p)
7589 < set_src_cost (temp, optimize_this_for_speed_p))
7590 temp = temp1;
7592 pos_rtx = temp;
7595 /* Make POS_RTX unless we already have it and it is correct. If we don't
7596 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7597 be a CONST_INT. */
7598 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7599 pos_rtx = orig_pos_rtx;
7601 else if (pos_rtx == 0)
7602 pos_rtx = GEN_INT (pos);
7604 /* Make the required operation. See if we can use existing rtx. */
7605 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7606 extraction_mode, inner, GEN_INT (len), pos_rtx);
7607 if (! in_dest)
7608 new_rtx = gen_lowpart (mode, new_rtx);
7610 return new_rtx;
7613 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7614 with any other operations in X. Return X without that shift if so. */
7616 static rtx
7617 extract_left_shift (rtx x, int count)
7619 enum rtx_code code = GET_CODE (x);
7620 machine_mode mode = GET_MODE (x);
7621 rtx tem;
7623 switch (code)
7625 case ASHIFT:
7626 /* This is the shift itself. If it is wide enough, we will return
7627 either the value being shifted if the shift count is equal to
7628 COUNT or a shift for the difference. */
7629 if (CONST_INT_P (XEXP (x, 1))
7630 && INTVAL (XEXP (x, 1)) >= count)
7631 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7632 INTVAL (XEXP (x, 1)) - count);
7633 break;
7635 case NEG: case NOT:
7636 if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7637 return simplify_gen_unary (code, mode, tem, mode);
7639 break;
7641 case PLUS: case IOR: case XOR: case AND:
7642 /* If we can safely shift this constant and we find the inner shift,
7643 make a new operation. */
7644 if (CONST_INT_P (XEXP (x, 1))
7645 && (UINTVAL (XEXP (x, 1))
7646 & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7647 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7649 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7650 return simplify_gen_binary (code, mode, tem,
7651 gen_int_mode (val, mode));
7653 break;
7655 default:
7656 break;
7659 return 0;
7662 /* Look at the expression rooted at X. Look for expressions
7663 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7664 Form these expressions.
7666 Return the new rtx, usually just X.
7668 Also, for machines like the VAX that don't have logical shift insns,
7669 try to convert logical to arithmetic shift operations in cases where
7670 they are equivalent. This undoes the canonicalizations to logical
7671 shifts done elsewhere.
7673 We try, as much as possible, to re-use rtl expressions to save memory.
7675 IN_CODE says what kind of expression we are processing. Normally, it is
7676 SET. In a memory address (inside a MEM, PLUS or minus, the latter two
7677 being kludges), it is MEM. When processing the arguments of a comparison
7678 or a COMPARE against zero, it is COMPARE. */
7681 make_compound_operation (rtx x, enum rtx_code in_code)
7683 enum rtx_code code = GET_CODE (x);
7684 machine_mode mode = GET_MODE (x);
7685 int mode_width = GET_MODE_PRECISION (mode);
7686 rtx rhs, lhs;
7687 enum rtx_code next_code;
7688 int i, j;
7689 rtx new_rtx = 0;
7690 rtx tem;
7691 const char *fmt;
7693 /* Select the code to be used in recursive calls. Once we are inside an
7694 address, we stay there. If we have a comparison, set to COMPARE,
7695 but once inside, go back to our default of SET. */
7697 next_code = (code == MEM ? MEM
7698 : ((code == PLUS || code == MINUS)
7699 && SCALAR_INT_MODE_P (mode)) ? MEM
7700 : ((code == COMPARE || COMPARISON_P (x))
7701 && XEXP (x, 1) == const0_rtx) ? COMPARE
7702 : in_code == COMPARE ? SET : in_code);
7704 /* Process depending on the code of this operation. If NEW is set
7705 nonzero, it will be returned. */
7707 switch (code)
7709 case ASHIFT:
7710 /* Convert shifts by constants into multiplications if inside
7711 an address. */
7712 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7713 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7714 && INTVAL (XEXP (x, 1)) >= 0
7715 && SCALAR_INT_MODE_P (mode))
7717 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7718 HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7720 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7721 if (GET_CODE (new_rtx) == NEG)
7723 new_rtx = XEXP (new_rtx, 0);
7724 multval = -multval;
7726 multval = trunc_int_for_mode (multval, mode);
7727 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7729 break;
7731 case PLUS:
7732 lhs = XEXP (x, 0);
7733 rhs = XEXP (x, 1);
7734 lhs = make_compound_operation (lhs, next_code);
7735 rhs = make_compound_operation (rhs, next_code);
7736 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7737 && SCALAR_INT_MODE_P (mode))
7739 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7740 XEXP (lhs, 1));
7741 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7743 else if (GET_CODE (lhs) == MULT
7744 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7746 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7747 simplify_gen_unary (NEG, mode,
7748 XEXP (lhs, 1),
7749 mode));
7750 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7752 else
7754 SUBST (XEXP (x, 0), lhs);
7755 SUBST (XEXP (x, 1), rhs);
7756 goto maybe_swap;
7758 x = gen_lowpart (mode, new_rtx);
7759 goto maybe_swap;
7761 case MINUS:
7762 lhs = XEXP (x, 0);
7763 rhs = XEXP (x, 1);
7764 lhs = make_compound_operation (lhs, next_code);
7765 rhs = make_compound_operation (rhs, next_code);
7766 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7767 && SCALAR_INT_MODE_P (mode))
7769 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7770 XEXP (rhs, 1));
7771 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7773 else if (GET_CODE (rhs) == MULT
7774 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7776 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7777 simplify_gen_unary (NEG, mode,
7778 XEXP (rhs, 1),
7779 mode));
7780 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7782 else
7784 SUBST (XEXP (x, 0), lhs);
7785 SUBST (XEXP (x, 1), rhs);
7786 return x;
7788 return gen_lowpart (mode, new_rtx);
7790 case AND:
7791 /* If the second operand is not a constant, we can't do anything
7792 with it. */
7793 if (!CONST_INT_P (XEXP (x, 1)))
7794 break;
7796 /* If the constant is a power of two minus one and the first operand
7797 is a logical right shift, make an extraction. */
7798 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7799 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7801 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7802 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7803 0, in_code == COMPARE);
7806 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7807 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7808 && subreg_lowpart_p (XEXP (x, 0))
7809 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7810 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7812 new_rtx = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
7813 next_code);
7814 new_rtx = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new_rtx, 0,
7815 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
7816 0, in_code == COMPARE);
7818 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7819 else if ((GET_CODE (XEXP (x, 0)) == XOR
7820 || GET_CODE (XEXP (x, 0)) == IOR)
7821 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7822 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7823 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7825 /* Apply the distributive law, and then try to make extractions. */
7826 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7827 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7828 XEXP (x, 1)),
7829 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7830 XEXP (x, 1)));
7831 new_rtx = make_compound_operation (new_rtx, in_code);
7834 /* If we are have (and (rotate X C) M) and C is larger than the number
7835 of bits in M, this is an extraction. */
7837 else if (GET_CODE (XEXP (x, 0)) == ROTATE
7838 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7839 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7840 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7842 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7843 new_rtx = make_extraction (mode, new_rtx,
7844 (GET_MODE_PRECISION (mode)
7845 - INTVAL (XEXP (XEXP (x, 0), 1))),
7846 NULL_RTX, i, 1, 0, in_code == COMPARE);
7849 /* On machines without logical shifts, if the operand of the AND is
7850 a logical shift and our mask turns off all the propagated sign
7851 bits, we can replace the logical shift with an arithmetic shift. */
7852 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7853 && !have_insn_for (LSHIFTRT, mode)
7854 && have_insn_for (ASHIFTRT, mode)
7855 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7856 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7857 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7858 && mode_width <= HOST_BITS_PER_WIDE_INT)
7860 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7862 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
7863 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
7864 SUBST (XEXP (x, 0),
7865 gen_rtx_ASHIFTRT (mode,
7866 make_compound_operation
7867 (XEXP (XEXP (x, 0), 0), next_code),
7868 XEXP (XEXP (x, 0), 1)));
7871 /* If the constant is one less than a power of two, this might be
7872 representable by an extraction even if no shift is present.
7873 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7874 we are in a COMPARE. */
7875 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7876 new_rtx = make_extraction (mode,
7877 make_compound_operation (XEXP (x, 0),
7878 next_code),
7879 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
7881 /* If we are in a comparison and this is an AND with a power of two,
7882 convert this into the appropriate bit extract. */
7883 else if (in_code == COMPARE
7884 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
7885 new_rtx = make_extraction (mode,
7886 make_compound_operation (XEXP (x, 0),
7887 next_code),
7888 i, NULL_RTX, 1, 1, 0, 1);
7890 break;
7892 case LSHIFTRT:
7893 /* If the sign bit is known to be zero, replace this with an
7894 arithmetic shift. */
7895 if (have_insn_for (ASHIFTRT, mode)
7896 && ! have_insn_for (LSHIFTRT, mode)
7897 && mode_width <= HOST_BITS_PER_WIDE_INT
7898 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
7900 new_rtx = gen_rtx_ASHIFTRT (mode,
7901 make_compound_operation (XEXP (x, 0),
7902 next_code),
7903 XEXP (x, 1));
7904 break;
7907 /* ... fall through ... */
7909 case ASHIFTRT:
7910 lhs = XEXP (x, 0);
7911 rhs = XEXP (x, 1);
7913 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7914 this is a SIGN_EXTRACT. */
7915 if (CONST_INT_P (rhs)
7916 && GET_CODE (lhs) == ASHIFT
7917 && CONST_INT_P (XEXP (lhs, 1))
7918 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
7919 && INTVAL (XEXP (lhs, 1)) >= 0
7920 && INTVAL (rhs) < mode_width)
7922 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
7923 new_rtx = make_extraction (mode, new_rtx,
7924 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
7925 NULL_RTX, mode_width - INTVAL (rhs),
7926 code == LSHIFTRT, 0, in_code == COMPARE);
7927 break;
7930 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7931 If so, try to merge the shifts into a SIGN_EXTEND. We could
7932 also do this for some cases of SIGN_EXTRACT, but it doesn't
7933 seem worth the effort; the case checked for occurs on Alpha. */
7935 if (!OBJECT_P (lhs)
7936 && ! (GET_CODE (lhs) == SUBREG
7937 && (OBJECT_P (SUBREG_REG (lhs))))
7938 && CONST_INT_P (rhs)
7939 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
7940 && INTVAL (rhs) < mode_width
7941 && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
7942 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
7943 0, NULL_RTX, mode_width - INTVAL (rhs),
7944 code == LSHIFTRT, 0, in_code == COMPARE);
7946 break;
7948 case SUBREG:
7949 /* Call ourselves recursively on the inner expression. If we are
7950 narrowing the object and it has a different RTL code from
7951 what it originally did, do this SUBREG as a force_to_mode. */
7953 rtx inner = SUBREG_REG (x), simplified;
7954 enum rtx_code subreg_code = in_code;
7956 /* If in_code is COMPARE, it isn't always safe to pass it through
7957 to the recursive make_compound_operation call. */
7958 if (subreg_code == COMPARE
7959 && (!subreg_lowpart_p (x)
7960 || GET_CODE (inner) == SUBREG
7961 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
7962 is (const_int 0), rather than
7963 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). */
7964 || (GET_CODE (inner) == AND
7965 && CONST_INT_P (XEXP (inner, 1))
7966 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7967 && exact_log2 (UINTVAL (XEXP (inner, 1)))
7968 >= GET_MODE_BITSIZE (mode))))
7969 subreg_code = SET;
7971 tem = make_compound_operation (inner, subreg_code);
7973 simplified
7974 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
7975 if (simplified)
7976 tem = simplified;
7978 if (GET_CODE (tem) != GET_CODE (inner)
7979 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7980 && subreg_lowpart_p (x))
7982 rtx newer
7983 = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
7985 /* If we have something other than a SUBREG, we might have
7986 done an expansion, so rerun ourselves. */
7987 if (GET_CODE (newer) != SUBREG)
7988 newer = make_compound_operation (newer, in_code);
7990 /* force_to_mode can expand compounds. If it just re-expanded the
7991 compound, use gen_lowpart to convert to the desired mode. */
7992 if (rtx_equal_p (newer, x)
7993 /* Likewise if it re-expanded the compound only partially.
7994 This happens for SUBREG of ZERO_EXTRACT if they extract
7995 the same number of bits. */
7996 || (GET_CODE (newer) == SUBREG
7997 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
7998 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
7999 && GET_CODE (inner) == AND
8000 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8001 return gen_lowpart (GET_MODE (x), tem);
8003 return newer;
8006 if (simplified)
8007 return tem;
8009 break;
8011 default:
8012 break;
8015 if (new_rtx)
8017 x = gen_lowpart (mode, new_rtx);
8018 code = GET_CODE (x);
8021 /* Now recursively process each operand of this operation. We need to
8022 handle ZERO_EXTEND specially so that we don't lose track of the
8023 inner mode. */
8024 if (GET_CODE (x) == ZERO_EXTEND)
8026 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8027 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8028 new_rtx, GET_MODE (XEXP (x, 0)));
8029 if (tem)
8030 return tem;
8031 SUBST (XEXP (x, 0), new_rtx);
8032 return x;
8035 fmt = GET_RTX_FORMAT (code);
8036 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8037 if (fmt[i] == 'e')
8039 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8040 SUBST (XEXP (x, i), new_rtx);
8042 else if (fmt[i] == 'E')
8043 for (j = 0; j < XVECLEN (x, i); j++)
8045 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8046 SUBST (XVECEXP (x, i, j), new_rtx);
8049 maybe_swap:
8050 /* If this is a commutative operation, the changes to the operands
8051 may have made it noncanonical. */
8052 if (COMMUTATIVE_ARITH_P (x)
8053 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
8055 tem = XEXP (x, 0);
8056 SUBST (XEXP (x, 0), XEXP (x, 1));
8057 SUBST (XEXP (x, 1), tem);
8060 return x;
8063 /* Given M see if it is a value that would select a field of bits
8064 within an item, but not the entire word. Return -1 if not.
8065 Otherwise, return the starting position of the field, where 0 is the
8066 low-order bit.
8068 *PLEN is set to the length of the field. */
8070 static int
8071 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8073 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8074 int pos = m ? ctz_hwi (m) : -1;
8075 int len = 0;
8077 if (pos >= 0)
8078 /* Now shift off the low-order zero bits and see if we have a
8079 power of two minus 1. */
8080 len = exact_log2 ((m >> pos) + 1);
8082 if (len <= 0)
8083 pos = -1;
8085 *plen = len;
8086 return pos;
8089 /* If X refers to a register that equals REG in value, replace these
8090 references with REG. */
8091 static rtx
8092 canon_reg_for_combine (rtx x, rtx reg)
8094 rtx op0, op1, op2;
8095 const char *fmt;
8096 int i;
8097 bool copied;
8099 enum rtx_code code = GET_CODE (x);
8100 switch (GET_RTX_CLASS (code))
8102 case RTX_UNARY:
8103 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8104 if (op0 != XEXP (x, 0))
8105 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8106 GET_MODE (reg));
8107 break;
8109 case RTX_BIN_ARITH:
8110 case RTX_COMM_ARITH:
8111 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8112 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8113 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8114 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8115 break;
8117 case RTX_COMPARE:
8118 case RTX_COMM_COMPARE:
8119 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8120 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8121 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8122 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8123 GET_MODE (op0), op0, op1);
8124 break;
8126 case RTX_TERNARY:
8127 case RTX_BITFIELD_OPS:
8128 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8129 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8130 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8131 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8132 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8133 GET_MODE (op0), op0, op1, op2);
8135 case RTX_OBJ:
8136 if (REG_P (x))
8138 if (rtx_equal_p (get_last_value (reg), x)
8139 || rtx_equal_p (reg, get_last_value (x)))
8140 return reg;
8141 else
8142 break;
8145 /* fall through */
8147 default:
8148 fmt = GET_RTX_FORMAT (code);
8149 copied = false;
8150 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8151 if (fmt[i] == 'e')
8153 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8154 if (op != XEXP (x, i))
8156 if (!copied)
8158 copied = true;
8159 x = copy_rtx (x);
8161 XEXP (x, i) = op;
8164 else if (fmt[i] == 'E')
8166 int j;
8167 for (j = 0; j < XVECLEN (x, i); j++)
8169 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8170 if (op != XVECEXP (x, i, j))
8172 if (!copied)
8174 copied = true;
8175 x = copy_rtx (x);
8177 XVECEXP (x, i, j) = op;
8182 break;
8185 return x;
8188 /* Return X converted to MODE. If the value is already truncated to
8189 MODE we can just return a subreg even though in the general case we
8190 would need an explicit truncation. */
8192 static rtx
8193 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8195 if (!CONST_INT_P (x)
8196 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
8197 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8198 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8200 /* Bit-cast X into an integer mode. */
8201 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8202 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
8203 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
8204 x, GET_MODE (x));
8207 return gen_lowpart (mode, x);
8210 /* See if X can be simplified knowing that we will only refer to it in
8211 MODE and will only refer to those bits that are nonzero in MASK.
8212 If other bits are being computed or if masking operations are done
8213 that select a superset of the bits in MASK, they can sometimes be
8214 ignored.
8216 Return a possibly simplified expression, but always convert X to
8217 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8219 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8220 are all off in X. This is used when X will be complemented, by either
8221 NOT, NEG, or XOR. */
8223 static rtx
8224 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8225 int just_select)
8227 enum rtx_code code = GET_CODE (x);
8228 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8229 machine_mode op_mode;
8230 unsigned HOST_WIDE_INT fuller_mask, nonzero;
8231 rtx op0, op1, temp;
8233 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8234 code below will do the wrong thing since the mode of such an
8235 expression is VOIDmode.
8237 Also do nothing if X is a CLOBBER; this can happen if X was
8238 the return value from a call to gen_lowpart. */
8239 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8240 return x;
8242 /* We want to perform the operation in its present mode unless we know
8243 that the operation is valid in MODE, in which case we do the operation
8244 in MODE. */
8245 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8246 && have_insn_for (code, mode))
8247 ? mode : GET_MODE (x));
8249 /* It is not valid to do a right-shift in a narrower mode
8250 than the one it came in with. */
8251 if ((code == LSHIFTRT || code == ASHIFTRT)
8252 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
8253 op_mode = GET_MODE (x);
8255 /* Truncate MASK to fit OP_MODE. */
8256 if (op_mode)
8257 mask &= GET_MODE_MASK (op_mode);
8259 /* When we have an arithmetic operation, or a shift whose count we
8260 do not know, we need to assume that all bits up to the highest-order
8261 bit in MASK will be needed. This is how we form such a mask. */
8262 if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
8263 fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
8264 else
8265 fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
8266 - 1);
8268 /* Determine what bits of X are guaranteed to be (non)zero. */
8269 nonzero = nonzero_bits (x, mode);
8271 /* If none of the bits in X are needed, return a zero. */
8272 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8273 x = const0_rtx;
8275 /* If X is a CONST_INT, return a new one. Do this here since the
8276 test below will fail. */
8277 if (CONST_INT_P (x))
8279 if (SCALAR_INT_MODE_P (mode))
8280 return gen_int_mode (INTVAL (x) & mask, mode);
8281 else
8283 x = GEN_INT (INTVAL (x) & mask);
8284 return gen_lowpart_common (mode, x);
8288 /* If X is narrower than MODE and we want all the bits in X's mode, just
8289 get X in the proper mode. */
8290 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8291 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8292 return gen_lowpart (mode, x);
8294 /* We can ignore the effect of a SUBREG if it narrows the mode or
8295 if the constant masks to zero all the bits the mode doesn't have. */
8296 if (GET_CODE (x) == SUBREG
8297 && subreg_lowpart_p (x)
8298 && ((GET_MODE_SIZE (GET_MODE (x))
8299 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8300 || (0 == (mask
8301 & GET_MODE_MASK (GET_MODE (x))
8302 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8303 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8305 /* The arithmetic simplifications here only work for scalar integer modes. */
8306 if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8307 return gen_lowpart_or_truncate (mode, x);
8309 switch (code)
8311 case CLOBBER:
8312 /* If X is a (clobber (const_int)), return it since we know we are
8313 generating something that won't match. */
8314 return x;
8316 case SIGN_EXTEND:
8317 case ZERO_EXTEND:
8318 case ZERO_EXTRACT:
8319 case SIGN_EXTRACT:
8320 x = expand_compound_operation (x);
8321 if (GET_CODE (x) != code)
8322 return force_to_mode (x, mode, mask, next_select);
8323 break;
8325 case TRUNCATE:
8326 /* Similarly for a truncate. */
8327 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8329 case AND:
8330 /* If this is an AND with a constant, convert it into an AND
8331 whose constant is the AND of that constant with MASK. If it
8332 remains an AND of MASK, delete it since it is redundant. */
8334 if (CONST_INT_P (XEXP (x, 1)))
8336 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8337 mask & INTVAL (XEXP (x, 1)));
8339 /* If X is still an AND, see if it is an AND with a mask that
8340 is just some low-order bits. If so, and it is MASK, we don't
8341 need it. */
8343 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8344 && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8345 == mask))
8346 x = XEXP (x, 0);
8348 /* If it remains an AND, try making another AND with the bits
8349 in the mode mask that aren't in MASK turned on. If the
8350 constant in the AND is wide enough, this might make a
8351 cheaper constant. */
8353 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8354 && GET_MODE_MASK (GET_MODE (x)) != mask
8355 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
8357 unsigned HOST_WIDE_INT cval
8358 = UINTVAL (XEXP (x, 1))
8359 | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8360 rtx y;
8362 y = simplify_gen_binary (AND, GET_MODE (x), XEXP (x, 0),
8363 gen_int_mode (cval, GET_MODE (x)));
8364 if (set_src_cost (y, optimize_this_for_speed_p)
8365 < set_src_cost (x, optimize_this_for_speed_p))
8366 x = y;
8369 break;
8372 goto binop;
8374 case PLUS:
8375 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8376 low-order bits (as in an alignment operation) and FOO is already
8377 aligned to that boundary, mask C1 to that boundary as well.
8378 This may eliminate that PLUS and, later, the AND. */
8381 unsigned int width = GET_MODE_PRECISION (mode);
8382 unsigned HOST_WIDE_INT smask = mask;
8384 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8385 number, sign extend it. */
8387 if (width < HOST_BITS_PER_WIDE_INT
8388 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8389 smask |= HOST_WIDE_INT_M1U << width;
8391 if (CONST_INT_P (XEXP (x, 1))
8392 && exact_log2 (- smask) >= 0
8393 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8394 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8395 return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
8396 (INTVAL (XEXP (x, 1)) & smask)),
8397 mode, smask, next_select);
8400 /* ... fall through ... */
8402 case MULT:
8403 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8404 most significant bit in MASK since carries from those bits will
8405 affect the bits we are interested in. */
8406 mask = fuller_mask;
8407 goto binop;
8409 case MINUS:
8410 /* If X is (minus C Y) where C's least set bit is larger than any bit
8411 in the mask, then we may replace with (neg Y). */
8412 if (CONST_INT_P (XEXP (x, 0))
8413 && ((UINTVAL (XEXP (x, 0)) & -UINTVAL (XEXP (x, 0))) > mask))
8415 x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8416 GET_MODE (x));
8417 return force_to_mode (x, mode, mask, next_select);
8420 /* Similarly, if C contains every bit in the fuller_mask, then we may
8421 replace with (not Y). */
8422 if (CONST_INT_P (XEXP (x, 0))
8423 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8425 x = simplify_gen_unary (NOT, GET_MODE (x),
8426 XEXP (x, 1), GET_MODE (x));
8427 return force_to_mode (x, mode, mask, next_select);
8430 mask = fuller_mask;
8431 goto binop;
8433 case IOR:
8434 case XOR:
8435 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8436 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8437 operation which may be a bitfield extraction. Ensure that the
8438 constant we form is not wider than the mode of X. */
8440 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8441 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8442 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8443 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8444 && CONST_INT_P (XEXP (x, 1))
8445 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8446 + floor_log2 (INTVAL (XEXP (x, 1))))
8447 < GET_MODE_PRECISION (GET_MODE (x)))
8448 && (UINTVAL (XEXP (x, 1))
8449 & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8451 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8452 << INTVAL (XEXP (XEXP (x, 0), 1)),
8453 GET_MODE (x));
8454 temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8455 XEXP (XEXP (x, 0), 0), temp);
8456 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8457 XEXP (XEXP (x, 0), 1));
8458 return force_to_mode (x, mode, mask, next_select);
8461 binop:
8462 /* For most binary operations, just propagate into the operation and
8463 change the mode if we have an operation of that mode. */
8465 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8466 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8468 /* If we ended up truncating both operands, truncate the result of the
8469 operation instead. */
8470 if (GET_CODE (op0) == TRUNCATE
8471 && GET_CODE (op1) == TRUNCATE)
8473 op0 = XEXP (op0, 0);
8474 op1 = XEXP (op1, 0);
8477 op0 = gen_lowpart_or_truncate (op_mode, op0);
8478 op1 = gen_lowpart_or_truncate (op_mode, op1);
8480 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8481 x = simplify_gen_binary (code, op_mode, op0, op1);
8482 break;
8484 case ASHIFT:
8485 /* For left shifts, do the same, but just for the first operand.
8486 However, we cannot do anything with shifts where we cannot
8487 guarantee that the counts are smaller than the size of the mode
8488 because such a count will have a different meaning in a
8489 wider mode. */
8491 if (! (CONST_INT_P (XEXP (x, 1))
8492 && INTVAL (XEXP (x, 1)) >= 0
8493 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8494 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8495 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8496 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8497 break;
8499 /* If the shift count is a constant and we can do arithmetic in
8500 the mode of the shift, refine which bits we need. Otherwise, use the
8501 conservative form of the mask. */
8502 if (CONST_INT_P (XEXP (x, 1))
8503 && INTVAL (XEXP (x, 1)) >= 0
8504 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8505 && HWI_COMPUTABLE_MODE_P (op_mode))
8506 mask >>= INTVAL (XEXP (x, 1));
8507 else
8508 mask = fuller_mask;
8510 op0 = gen_lowpart_or_truncate (op_mode,
8511 force_to_mode (XEXP (x, 0), op_mode,
8512 mask, next_select));
8514 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8515 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8516 break;
8518 case LSHIFTRT:
8519 /* Here we can only do something if the shift count is a constant,
8520 this shift constant is valid for the host, and we can do arithmetic
8521 in OP_MODE. */
8523 if (CONST_INT_P (XEXP (x, 1))
8524 && INTVAL (XEXP (x, 1)) >= 0
8525 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8526 && HWI_COMPUTABLE_MODE_P (op_mode))
8528 rtx inner = XEXP (x, 0);
8529 unsigned HOST_WIDE_INT inner_mask;
8531 /* Select the mask of the bits we need for the shift operand. */
8532 inner_mask = mask << INTVAL (XEXP (x, 1));
8534 /* We can only change the mode of the shift if we can do arithmetic
8535 in the mode of the shift and INNER_MASK is no wider than the
8536 width of X's mode. */
8537 if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8538 op_mode = GET_MODE (x);
8540 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8542 if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8543 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8546 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8547 shift and AND produces only copies of the sign bit (C2 is one less
8548 than a power of two), we can do this with just a shift. */
8550 if (GET_CODE (x) == LSHIFTRT
8551 && CONST_INT_P (XEXP (x, 1))
8552 /* The shift puts one of the sign bit copies in the least significant
8553 bit. */
8554 && ((INTVAL (XEXP (x, 1))
8555 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8556 >= GET_MODE_PRECISION (GET_MODE (x)))
8557 && exact_log2 (mask + 1) >= 0
8558 /* Number of bits left after the shift must be more than the mask
8559 needs. */
8560 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8561 <= GET_MODE_PRECISION (GET_MODE (x)))
8562 /* Must be more sign bit copies than the mask needs. */
8563 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8564 >= exact_log2 (mask + 1)))
8565 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8566 GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
8567 - exact_log2 (mask + 1)));
8569 goto shiftrt;
8571 case ASHIFTRT:
8572 /* If we are just looking for the sign bit, we don't need this shift at
8573 all, even if it has a variable count. */
8574 if (val_signbit_p (GET_MODE (x), mask))
8575 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8577 /* If this is a shift by a constant, get a mask that contains those bits
8578 that are not copies of the sign bit. We then have two cases: If
8579 MASK only includes those bits, this can be a logical shift, which may
8580 allow simplifications. If MASK is a single-bit field not within
8581 those bits, we are requesting a copy of the sign bit and hence can
8582 shift the sign bit to the appropriate location. */
8584 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8585 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8587 int i;
8589 /* If the considered data is wider than HOST_WIDE_INT, we can't
8590 represent a mask for all its bits in a single scalar.
8591 But we only care about the lower bits, so calculate these. */
8593 if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8595 nonzero = ~(unsigned HOST_WIDE_INT) 0;
8597 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8598 is the number of bits a full-width mask would have set.
8599 We need only shift if these are fewer than nonzero can
8600 hold. If not, we must keep all bits set in nonzero. */
8602 if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8603 < HOST_BITS_PER_WIDE_INT)
8604 nonzero >>= INTVAL (XEXP (x, 1))
8605 + HOST_BITS_PER_WIDE_INT
8606 - GET_MODE_PRECISION (GET_MODE (x)) ;
8608 else
8610 nonzero = GET_MODE_MASK (GET_MODE (x));
8611 nonzero >>= INTVAL (XEXP (x, 1));
8614 if ((mask & ~nonzero) == 0)
8616 x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8617 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8618 if (GET_CODE (x) != ASHIFTRT)
8619 return force_to_mode (x, mode, mask, next_select);
8622 else if ((i = exact_log2 (mask)) >= 0)
8624 x = simplify_shift_const
8625 (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8626 GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
8628 if (GET_CODE (x) != ASHIFTRT)
8629 return force_to_mode (x, mode, mask, next_select);
8633 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8634 even if the shift count isn't a constant. */
8635 if (mask == 1)
8636 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8637 XEXP (x, 0), XEXP (x, 1));
8639 shiftrt:
8641 /* If this is a zero- or sign-extension operation that just affects bits
8642 we don't care about, remove it. Be sure the call above returned
8643 something that is still a shift. */
8645 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8646 && CONST_INT_P (XEXP (x, 1))
8647 && INTVAL (XEXP (x, 1)) >= 0
8648 && (INTVAL (XEXP (x, 1))
8649 <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
8650 && GET_CODE (XEXP (x, 0)) == ASHIFT
8651 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8652 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8653 next_select);
8655 break;
8657 case ROTATE:
8658 case ROTATERT:
8659 /* If the shift count is constant and we can do computations
8660 in the mode of X, compute where the bits we care about are.
8661 Otherwise, we can't do anything. Don't change the mode of
8662 the shift or propagate MODE into the shift, though. */
8663 if (CONST_INT_P (XEXP (x, 1))
8664 && INTVAL (XEXP (x, 1)) >= 0)
8666 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8667 GET_MODE (x),
8668 gen_int_mode (mask, GET_MODE (x)),
8669 XEXP (x, 1));
8670 if (temp && CONST_INT_P (temp))
8671 x = simplify_gen_binary (code, GET_MODE (x),
8672 force_to_mode (XEXP (x, 0), GET_MODE (x),
8673 INTVAL (temp), next_select),
8674 XEXP (x, 1));
8676 break;
8678 case NEG:
8679 /* If we just want the low-order bit, the NEG isn't needed since it
8680 won't change the low-order bit. */
8681 if (mask == 1)
8682 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8684 /* We need any bits less significant than the most significant bit in
8685 MASK since carries from those bits will affect the bits we are
8686 interested in. */
8687 mask = fuller_mask;
8688 goto unop;
8690 case NOT:
8691 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8692 same as the XOR case above. Ensure that the constant we form is not
8693 wider than the mode of X. */
8695 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8696 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8697 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8698 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8699 < GET_MODE_PRECISION (GET_MODE (x)))
8700 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8702 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8703 GET_MODE (x));
8704 temp = simplify_gen_binary (XOR, GET_MODE (x),
8705 XEXP (XEXP (x, 0), 0), temp);
8706 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8707 temp, XEXP (XEXP (x, 0), 1));
8709 return force_to_mode (x, mode, mask, next_select);
8712 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8713 use the full mask inside the NOT. */
8714 mask = fuller_mask;
8716 unop:
8717 op0 = gen_lowpart_or_truncate (op_mode,
8718 force_to_mode (XEXP (x, 0), mode, mask,
8719 next_select));
8720 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8721 x = simplify_gen_unary (code, op_mode, op0, op_mode);
8722 break;
8724 case NE:
8725 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8726 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8727 which is equal to STORE_FLAG_VALUE. */
8728 if ((mask & ~STORE_FLAG_VALUE) == 0
8729 && XEXP (x, 1) == const0_rtx
8730 && GET_MODE (XEXP (x, 0)) == mode
8731 && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8732 && (nonzero_bits (XEXP (x, 0), mode)
8733 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8734 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8736 break;
8738 case IF_THEN_ELSE:
8739 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8740 written in a narrower mode. We play it safe and do not do so. */
8742 op0 = gen_lowpart_or_truncate (GET_MODE (x),
8743 force_to_mode (XEXP (x, 1), mode,
8744 mask, next_select));
8745 op1 = gen_lowpart_or_truncate (GET_MODE (x),
8746 force_to_mode (XEXP (x, 2), mode,
8747 mask, next_select));
8748 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
8749 x = simplify_gen_ternary (IF_THEN_ELSE, GET_MODE (x),
8750 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
8751 op0, op1);
8752 break;
8754 default:
8755 break;
8758 /* Ensure we return a value of the proper mode. */
8759 return gen_lowpart_or_truncate (mode, x);
8762 /* Return nonzero if X is an expression that has one of two values depending on
8763 whether some other value is zero or nonzero. In that case, we return the
8764 value that is being tested, *PTRUE is set to the value if the rtx being
8765 returned has a nonzero value, and *PFALSE is set to the other alternative.
8767 If we return zero, we set *PTRUE and *PFALSE to X. */
8769 static rtx
8770 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8772 machine_mode mode = GET_MODE (x);
8773 enum rtx_code code = GET_CODE (x);
8774 rtx cond0, cond1, true0, true1, false0, false1;
8775 unsigned HOST_WIDE_INT nz;
8777 /* If we are comparing a value against zero, we are done. */
8778 if ((code == NE || code == EQ)
8779 && XEXP (x, 1) == const0_rtx)
8781 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8782 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8783 return XEXP (x, 0);
8786 /* If this is a unary operation whose operand has one of two values, apply
8787 our opcode to compute those values. */
8788 else if (UNARY_P (x)
8789 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8791 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8792 *pfalse = simplify_gen_unary (code, mode, false0,
8793 GET_MODE (XEXP (x, 0)));
8794 return cond0;
8797 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8798 make can't possibly match and would suppress other optimizations. */
8799 else if (code == COMPARE)
8802 /* If this is a binary operation, see if either side has only one of two
8803 values. If either one does or if both do and they are conditional on
8804 the same value, compute the new true and false values. */
8805 else if (BINARY_P (x))
8807 cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8808 cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8810 if ((cond0 != 0 || cond1 != 0)
8811 && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8813 /* If if_then_else_cond returned zero, then true/false are the
8814 same rtl. We must copy one of them to prevent invalid rtl
8815 sharing. */
8816 if (cond0 == 0)
8817 true0 = copy_rtx (true0);
8818 else if (cond1 == 0)
8819 true1 = copy_rtx (true1);
8821 if (COMPARISON_P (x))
8823 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8824 true0, true1);
8825 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8826 false0, false1);
8828 else
8830 *ptrue = simplify_gen_binary (code, mode, true0, true1);
8831 *pfalse = simplify_gen_binary (code, mode, false0, false1);
8834 return cond0 ? cond0 : cond1;
8837 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8838 operands is zero when the other is nonzero, and vice-versa,
8839 and STORE_FLAG_VALUE is 1 or -1. */
8841 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8842 && (code == PLUS || code == IOR || code == XOR || code == MINUS
8843 || code == UMAX)
8844 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8846 rtx op0 = XEXP (XEXP (x, 0), 1);
8847 rtx op1 = XEXP (XEXP (x, 1), 1);
8849 cond0 = XEXP (XEXP (x, 0), 0);
8850 cond1 = XEXP (XEXP (x, 1), 0);
8852 if (COMPARISON_P (cond0)
8853 && COMPARISON_P (cond1)
8854 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8855 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8856 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8857 || ((swap_condition (GET_CODE (cond0))
8858 == reversed_comparison_code (cond1, NULL))
8859 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8860 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8861 && ! side_effects_p (x))
8863 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
8864 *pfalse = simplify_gen_binary (MULT, mode,
8865 (code == MINUS
8866 ? simplify_gen_unary (NEG, mode,
8867 op1, mode)
8868 : op1),
8869 const_true_rtx);
8870 return cond0;
8874 /* Similarly for MULT, AND and UMIN, except that for these the result
8875 is always zero. */
8876 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8877 && (code == MULT || code == AND || code == UMIN)
8878 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8880 cond0 = XEXP (XEXP (x, 0), 0);
8881 cond1 = XEXP (XEXP (x, 1), 0);
8883 if (COMPARISON_P (cond0)
8884 && COMPARISON_P (cond1)
8885 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8886 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8887 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8888 || ((swap_condition (GET_CODE (cond0))
8889 == reversed_comparison_code (cond1, NULL))
8890 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8891 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8892 && ! side_effects_p (x))
8894 *ptrue = *pfalse = const0_rtx;
8895 return cond0;
8900 else if (code == IF_THEN_ELSE)
8902 /* If we have IF_THEN_ELSE already, extract the condition and
8903 canonicalize it if it is NE or EQ. */
8904 cond0 = XEXP (x, 0);
8905 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
8906 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
8907 return XEXP (cond0, 0);
8908 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
8910 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
8911 return XEXP (cond0, 0);
8913 else
8914 return cond0;
8917 /* If X is a SUBREG, we can narrow both the true and false values
8918 if the inner expression, if there is a condition. */
8919 else if (code == SUBREG
8920 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
8921 &true0, &false0)))
8923 true0 = simplify_gen_subreg (mode, true0,
8924 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8925 false0 = simplify_gen_subreg (mode, false0,
8926 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8927 if (true0 && false0)
8929 *ptrue = true0;
8930 *pfalse = false0;
8931 return cond0;
8935 /* If X is a constant, this isn't special and will cause confusions
8936 if we treat it as such. Likewise if it is equivalent to a constant. */
8937 else if (CONSTANT_P (x)
8938 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
8941 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8942 will be least confusing to the rest of the compiler. */
8943 else if (mode == BImode)
8945 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
8946 return x;
8949 /* If X is known to be either 0 or -1, those are the true and
8950 false values when testing X. */
8951 else if (x == constm1_rtx || x == const0_rtx
8952 || (mode != VOIDmode
8953 && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
8955 *ptrue = constm1_rtx, *pfalse = const0_rtx;
8956 return x;
8959 /* Likewise for 0 or a single bit. */
8960 else if (HWI_COMPUTABLE_MODE_P (mode)
8961 && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
8963 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
8964 return x;
8967 /* Otherwise fail; show no condition with true and false values the same. */
8968 *ptrue = *pfalse = x;
8969 return 0;
8972 /* Return the value of expression X given the fact that condition COND
8973 is known to be true when applied to REG as its first operand and VAL
8974 as its second. X is known to not be shared and so can be modified in
8975 place.
8977 We only handle the simplest cases, and specifically those cases that
8978 arise with IF_THEN_ELSE expressions. */
8980 static rtx
8981 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
8983 enum rtx_code code = GET_CODE (x);
8984 rtx temp;
8985 const char *fmt;
8986 int i, j;
8988 if (side_effects_p (x))
8989 return x;
8991 /* If either operand of the condition is a floating point value,
8992 then we have to avoid collapsing an EQ comparison. */
8993 if (cond == EQ
8994 && rtx_equal_p (x, reg)
8995 && ! FLOAT_MODE_P (GET_MODE (x))
8996 && ! FLOAT_MODE_P (GET_MODE (val)))
8997 return val;
8999 if (cond == UNEQ && rtx_equal_p (x, reg))
9000 return val;
9002 /* If X is (abs REG) and we know something about REG's relationship
9003 with zero, we may be able to simplify this. */
9005 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9006 switch (cond)
9008 case GE: case GT: case EQ:
9009 return XEXP (x, 0);
9010 case LT: case LE:
9011 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9012 XEXP (x, 0),
9013 GET_MODE (XEXP (x, 0)));
9014 default:
9015 break;
9018 /* The only other cases we handle are MIN, MAX, and comparisons if the
9019 operands are the same as REG and VAL. */
9021 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9023 if (rtx_equal_p (XEXP (x, 0), val))
9024 cond = swap_condition (cond), temp = val, val = reg, reg = temp;
9026 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9028 if (COMPARISON_P (x))
9030 if (comparison_dominates_p (cond, code))
9031 return const_true_rtx;
9033 code = reversed_comparison_code (x, NULL);
9034 if (code != UNKNOWN
9035 && comparison_dominates_p (cond, code))
9036 return const0_rtx;
9037 else
9038 return x;
9040 else if (code == SMAX || code == SMIN
9041 || code == UMIN || code == UMAX)
9043 int unsignedp = (code == UMIN || code == UMAX);
9045 /* Do not reverse the condition when it is NE or EQ.
9046 This is because we cannot conclude anything about
9047 the value of 'SMAX (x, y)' when x is not equal to y,
9048 but we can when x equals y. */
9049 if ((code == SMAX || code == UMAX)
9050 && ! (cond == EQ || cond == NE))
9051 cond = reverse_condition (cond);
9053 switch (cond)
9055 case GE: case GT:
9056 return unsignedp ? x : XEXP (x, 1);
9057 case LE: case LT:
9058 return unsignedp ? x : XEXP (x, 0);
9059 case GEU: case GTU:
9060 return unsignedp ? XEXP (x, 1) : x;
9061 case LEU: case LTU:
9062 return unsignedp ? XEXP (x, 0) : x;
9063 default:
9064 break;
9069 else if (code == SUBREG)
9071 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9072 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9074 if (SUBREG_REG (x) != r)
9076 /* We must simplify subreg here, before we lose track of the
9077 original inner_mode. */
9078 new_rtx = simplify_subreg (GET_MODE (x), r,
9079 inner_mode, SUBREG_BYTE (x));
9080 if (new_rtx)
9081 return new_rtx;
9082 else
9083 SUBST (SUBREG_REG (x), r);
9086 return x;
9088 /* We don't have to handle SIGN_EXTEND here, because even in the
9089 case of replacing something with a modeless CONST_INT, a
9090 CONST_INT is already (supposed to be) a valid sign extension for
9091 its narrower mode, which implies it's already properly
9092 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9093 story is different. */
9094 else if (code == ZERO_EXTEND)
9096 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9097 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9099 if (XEXP (x, 0) != r)
9101 /* We must simplify the zero_extend here, before we lose
9102 track of the original inner_mode. */
9103 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9104 r, inner_mode);
9105 if (new_rtx)
9106 return new_rtx;
9107 else
9108 SUBST (XEXP (x, 0), r);
9111 return x;
9114 fmt = GET_RTX_FORMAT (code);
9115 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9117 if (fmt[i] == 'e')
9118 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9119 else if (fmt[i] == 'E')
9120 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9121 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9122 cond, reg, val));
9125 return x;
9128 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9129 assignment as a field assignment. */
9131 static int
9132 rtx_equal_for_field_assignment_p (rtx x, rtx y)
9134 if (x == y || rtx_equal_p (x, y))
9135 return 1;
9137 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9138 return 0;
9140 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9141 Note that all SUBREGs of MEM are paradoxical; otherwise they
9142 would have been rewritten. */
9143 if (MEM_P (x) && GET_CODE (y) == SUBREG
9144 && MEM_P (SUBREG_REG (y))
9145 && rtx_equal_p (SUBREG_REG (y),
9146 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9147 return 1;
9149 if (MEM_P (y) && GET_CODE (x) == SUBREG
9150 && MEM_P (SUBREG_REG (x))
9151 && rtx_equal_p (SUBREG_REG (x),
9152 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9153 return 1;
9155 /* We used to see if get_last_value of X and Y were the same but that's
9156 not correct. In one direction, we'll cause the assignment to have
9157 the wrong destination and in the case, we'll import a register into this
9158 insn that might have already have been dead. So fail if none of the
9159 above cases are true. */
9160 return 0;
9163 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9164 Return that assignment if so.
9166 We only handle the most common cases. */
9168 static rtx
9169 make_field_assignment (rtx x)
9171 rtx dest = SET_DEST (x);
9172 rtx src = SET_SRC (x);
9173 rtx assign;
9174 rtx rhs, lhs;
9175 HOST_WIDE_INT c1;
9176 HOST_WIDE_INT pos;
9177 unsigned HOST_WIDE_INT len;
9178 rtx other;
9179 machine_mode mode;
9181 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9182 a clear of a one-bit field. We will have changed it to
9183 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9184 for a SUBREG. */
9186 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9187 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9188 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9189 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9191 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9192 1, 1, 1, 0);
9193 if (assign != 0)
9194 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
9195 return x;
9198 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9199 && subreg_lowpart_p (XEXP (src, 0))
9200 && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
9201 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
9202 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9203 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9204 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9205 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9207 assign = make_extraction (VOIDmode, dest, 0,
9208 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9209 1, 1, 1, 0);
9210 if (assign != 0)
9211 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
9212 return x;
9215 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9216 one-bit field. */
9217 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9218 && XEXP (XEXP (src, 0), 0) == const1_rtx
9219 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9221 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9222 1, 1, 1, 0);
9223 if (assign != 0)
9224 return gen_rtx_SET (VOIDmode, assign, const1_rtx);
9225 return x;
9228 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9229 SRC is an AND with all bits of that field set, then we can discard
9230 the AND. */
9231 if (GET_CODE (dest) == ZERO_EXTRACT
9232 && CONST_INT_P (XEXP (dest, 1))
9233 && GET_CODE (src) == AND
9234 && CONST_INT_P (XEXP (src, 1)))
9236 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9237 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9238 unsigned HOST_WIDE_INT ze_mask;
9240 if (width >= HOST_BITS_PER_WIDE_INT)
9241 ze_mask = -1;
9242 else
9243 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9245 /* Complete overlap. We can remove the source AND. */
9246 if ((and_mask & ze_mask) == ze_mask)
9247 return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));
9249 /* Partial overlap. We can reduce the source AND. */
9250 if ((and_mask & ze_mask) != and_mask)
9252 mode = GET_MODE (src);
9253 src = gen_rtx_AND (mode, XEXP (src, 0),
9254 gen_int_mode (and_mask & ze_mask, mode));
9255 return gen_rtx_SET (VOIDmode, dest, src);
9259 /* The other case we handle is assignments into a constant-position
9260 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9261 a mask that has all one bits except for a group of zero bits and
9262 OTHER is known to have zeros where C1 has ones, this is such an
9263 assignment. Compute the position and length from C1. Shift OTHER
9264 to the appropriate position, force it to the required mode, and
9265 make the extraction. Check for the AND in both operands. */
9267 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9268 return x;
9270 rhs = expand_compound_operation (XEXP (src, 0));
9271 lhs = expand_compound_operation (XEXP (src, 1));
9273 if (GET_CODE (rhs) == AND
9274 && CONST_INT_P (XEXP (rhs, 1))
9275 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9276 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9277 else if (GET_CODE (lhs) == AND
9278 && CONST_INT_P (XEXP (lhs, 1))
9279 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9280 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9281 else
9282 return x;
9284 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9285 if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
9286 || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9287 || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9288 return x;
9290 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9291 if (assign == 0)
9292 return x;
9294 /* The mode to use for the source is the mode of the assignment, or of
9295 what is inside a possible STRICT_LOW_PART. */
9296 mode = (GET_CODE (assign) == STRICT_LOW_PART
9297 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9299 /* Shift OTHER right POS places and make it the source, restricting it
9300 to the proper length and mode. */
9302 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9303 GET_MODE (src),
9304 other, pos),
9305 dest);
9306 src = force_to_mode (src, mode,
9307 GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
9308 ? ~(unsigned HOST_WIDE_INT) 0
9309 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9312 /* If SRC is masked by an AND that does not make a difference in
9313 the value being stored, strip it. */
9314 if (GET_CODE (assign) == ZERO_EXTRACT
9315 && CONST_INT_P (XEXP (assign, 1))
9316 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9317 && GET_CODE (src) == AND
9318 && CONST_INT_P (XEXP (src, 1))
9319 && UINTVAL (XEXP (src, 1))
9320 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9321 src = XEXP (src, 0);
9323 return gen_rtx_SET (VOIDmode, assign, src);
9326 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9327 if so. */
9329 static rtx
9330 apply_distributive_law (rtx x)
9332 enum rtx_code code = GET_CODE (x);
9333 enum rtx_code inner_code;
9334 rtx lhs, rhs, other;
9335 rtx tem;
9337 /* Distributivity is not true for floating point as it can change the
9338 value. So we don't do it unless -funsafe-math-optimizations. */
9339 if (FLOAT_MODE_P (GET_MODE (x))
9340 && ! flag_unsafe_math_optimizations)
9341 return x;
9343 /* The outer operation can only be one of the following: */
9344 if (code != IOR && code != AND && code != XOR
9345 && code != PLUS && code != MINUS)
9346 return x;
9348 lhs = XEXP (x, 0);
9349 rhs = XEXP (x, 1);
9351 /* If either operand is a primitive we can't do anything, so get out
9352 fast. */
9353 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9354 return x;
9356 lhs = expand_compound_operation (lhs);
9357 rhs = expand_compound_operation (rhs);
9358 inner_code = GET_CODE (lhs);
9359 if (inner_code != GET_CODE (rhs))
9360 return x;
9362 /* See if the inner and outer operations distribute. */
9363 switch (inner_code)
9365 case LSHIFTRT:
9366 case ASHIFTRT:
9367 case AND:
9368 case IOR:
9369 /* These all distribute except over PLUS. */
9370 if (code == PLUS || code == MINUS)
9371 return x;
9372 break;
9374 case MULT:
9375 if (code != PLUS && code != MINUS)
9376 return x;
9377 break;
9379 case ASHIFT:
9380 /* This is also a multiply, so it distributes over everything. */
9381 break;
9383 /* This used to handle SUBREG, but this turned out to be counter-
9384 productive, since (subreg (op ...)) usually is not handled by
9385 insn patterns, and this "optimization" therefore transformed
9386 recognizable patterns into unrecognizable ones. Therefore the
9387 SUBREG case was removed from here.
9389 It is possible that distributing SUBREG over arithmetic operations
9390 leads to an intermediate result than can then be optimized further,
9391 e.g. by moving the outer SUBREG to the other side of a SET as done
9392 in simplify_set. This seems to have been the original intent of
9393 handling SUBREGs here.
9395 However, with current GCC this does not appear to actually happen,
9396 at least on major platforms. If some case is found where removing
9397 the SUBREG case here prevents follow-on optimizations, distributing
9398 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9400 default:
9401 return x;
9404 /* Set LHS and RHS to the inner operands (A and B in the example
9405 above) and set OTHER to the common operand (C in the example).
9406 There is only one way to do this unless the inner operation is
9407 commutative. */
9408 if (COMMUTATIVE_ARITH_P (lhs)
9409 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9410 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9411 else if (COMMUTATIVE_ARITH_P (lhs)
9412 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9413 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9414 else if (COMMUTATIVE_ARITH_P (lhs)
9415 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9416 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9417 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9418 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9419 else
9420 return x;
9422 /* Form the new inner operation, seeing if it simplifies first. */
9423 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9425 /* There is one exception to the general way of distributing:
9426 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9427 if (code == XOR && inner_code == IOR)
9429 inner_code = AND;
9430 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9433 /* We may be able to continuing distributing the result, so call
9434 ourselves recursively on the inner operation before forming the
9435 outer operation, which we return. */
9436 return simplify_gen_binary (inner_code, GET_MODE (x),
9437 apply_distributive_law (tem), other);
9440 /* See if X is of the form (* (+ A B) C), and if so convert to
9441 (+ (* A C) (* B C)) and try to simplify.
9443 Most of the time, this results in no change. However, if some of
9444 the operands are the same or inverses of each other, simplifications
9445 will result.
9447 For example, (and (ior A B) (not B)) can occur as the result of
9448 expanding a bit field assignment. When we apply the distributive
9449 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9450 which then simplifies to (and (A (not B))).
9452 Note that no checks happen on the validity of applying the inverse
9453 distributive law. This is pointless since we can do it in the
9454 few places where this routine is called.
9456 N is the index of the term that is decomposed (the arithmetic operation,
9457 i.e. (+ A B) in the first example above). !N is the index of the term that
9458 is distributed, i.e. of C in the first example above. */
9459 static rtx
9460 distribute_and_simplify_rtx (rtx x, int n)
9462 machine_mode mode;
9463 enum rtx_code outer_code, inner_code;
9464 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9466 /* Distributivity is not true for floating point as it can change the
9467 value. So we don't do it unless -funsafe-math-optimizations. */
9468 if (FLOAT_MODE_P (GET_MODE (x))
9469 && ! flag_unsafe_math_optimizations)
9470 return NULL_RTX;
9472 decomposed = XEXP (x, n);
9473 if (!ARITHMETIC_P (decomposed))
9474 return NULL_RTX;
9476 mode = GET_MODE (x);
9477 outer_code = GET_CODE (x);
9478 distributed = XEXP (x, !n);
9480 inner_code = GET_CODE (decomposed);
9481 inner_op0 = XEXP (decomposed, 0);
9482 inner_op1 = XEXP (decomposed, 1);
9484 /* Special case (and (xor B C) (not A)), which is equivalent to
9485 (xor (ior A B) (ior A C)) */
9486 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9488 distributed = XEXP (distributed, 0);
9489 outer_code = IOR;
9492 if (n == 0)
9494 /* Distribute the second term. */
9495 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9496 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9498 else
9500 /* Distribute the first term. */
9501 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9502 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9505 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9506 new_op0, new_op1));
9507 if (GET_CODE (tmp) != outer_code
9508 && (set_src_cost (tmp, optimize_this_for_speed_p)
9509 < set_src_cost (x, optimize_this_for_speed_p)))
9510 return tmp;
9512 return NULL_RTX;
9515 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9516 in MODE. Return an equivalent form, if different from (and VAROP
9517 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9519 static rtx
9520 simplify_and_const_int_1 (machine_mode mode, rtx varop,
9521 unsigned HOST_WIDE_INT constop)
9523 unsigned HOST_WIDE_INT nonzero;
9524 unsigned HOST_WIDE_INT orig_constop;
9525 rtx orig_varop;
9526 int i;
9528 orig_varop = varop;
9529 orig_constop = constop;
9530 if (GET_CODE (varop) == CLOBBER)
9531 return NULL_RTX;
9533 /* Simplify VAROP knowing that we will be only looking at some of the
9534 bits in it.
9536 Note by passing in CONSTOP, we guarantee that the bits not set in
9537 CONSTOP are not significant and will never be examined. We must
9538 ensure that is the case by explicitly masking out those bits
9539 before returning. */
9540 varop = force_to_mode (varop, mode, constop, 0);
9542 /* If VAROP is a CLOBBER, we will fail so return it. */
9543 if (GET_CODE (varop) == CLOBBER)
9544 return varop;
9546 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9547 to VAROP and return the new constant. */
9548 if (CONST_INT_P (varop))
9549 return gen_int_mode (INTVAL (varop) & constop, mode);
9551 /* See what bits may be nonzero in VAROP. Unlike the general case of
9552 a call to nonzero_bits, here we don't care about bits outside
9553 MODE. */
9555 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9557 /* Turn off all bits in the constant that are known to already be zero.
9558 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9559 which is tested below. */
9561 constop &= nonzero;
9563 /* If we don't have any bits left, return zero. */
9564 if (constop == 0)
9565 return const0_rtx;
9567 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9568 a power of two, we can replace this with an ASHIFT. */
9569 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9570 && (i = exact_log2 (constop)) >= 0)
9571 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9573 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9574 or XOR, then try to apply the distributive law. This may eliminate
9575 operations if either branch can be simplified because of the AND.
9576 It may also make some cases more complex, but those cases probably
9577 won't match a pattern either with or without this. */
9579 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9580 return
9581 gen_lowpart
9582 (mode,
9583 apply_distributive_law
9584 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9585 simplify_and_const_int (NULL_RTX,
9586 GET_MODE (varop),
9587 XEXP (varop, 0),
9588 constop),
9589 simplify_and_const_int (NULL_RTX,
9590 GET_MODE (varop),
9591 XEXP (varop, 1),
9592 constop))));
9594 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9595 the AND and see if one of the operands simplifies to zero. If so, we
9596 may eliminate it. */
9598 if (GET_CODE (varop) == PLUS
9599 && exact_log2 (constop + 1) >= 0)
9601 rtx o0, o1;
9603 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9604 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9605 if (o0 == const0_rtx)
9606 return o1;
9607 if (o1 == const0_rtx)
9608 return o0;
9611 /* Make a SUBREG if necessary. If we can't make it, fail. */
9612 varop = gen_lowpart (mode, varop);
9613 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9614 return NULL_RTX;
9616 /* If we are only masking insignificant bits, return VAROP. */
9617 if (constop == nonzero)
9618 return varop;
9620 if (varop == orig_varop && constop == orig_constop)
9621 return NULL_RTX;
9623 /* Otherwise, return an AND. */
9624 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9628 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9629 in MODE.
9631 Return an equivalent form, if different from X. Otherwise, return X. If
9632 X is zero, we are to always construct the equivalent form. */
9634 static rtx
9635 simplify_and_const_int (rtx x, machine_mode mode, rtx varop,
9636 unsigned HOST_WIDE_INT constop)
9638 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9639 if (tem)
9640 return tem;
9642 if (!x)
9643 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9644 gen_int_mode (constop, mode));
9645 if (GET_MODE (x) != mode)
9646 x = gen_lowpart (mode, x);
9647 return x;
9650 /* Given a REG, X, compute which bits in X can be nonzero.
9651 We don't care about bits outside of those defined in MODE.
9653 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9654 a shift, AND, or zero_extract, we can do better. */
9656 static rtx
9657 reg_nonzero_bits_for_combine (const_rtx x, machine_mode mode,
9658 const_rtx known_x ATTRIBUTE_UNUSED,
9659 machine_mode known_mode ATTRIBUTE_UNUSED,
9660 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9661 unsigned HOST_WIDE_INT *nonzero)
9663 rtx tem;
9664 reg_stat_type *rsp;
9666 /* If X is a register whose nonzero bits value is current, use it.
9667 Otherwise, if X is a register whose value we can find, use that
9668 value. Otherwise, use the previously-computed global nonzero bits
9669 for this register. */
9671 rsp = &reg_stat[REGNO (x)];
9672 if (rsp->last_set_value != 0
9673 && (rsp->last_set_mode == mode
9674 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9675 && GET_MODE_CLASS (mode) == MODE_INT))
9676 && ((rsp->last_set_label >= label_tick_ebb_start
9677 && rsp->last_set_label < label_tick)
9678 || (rsp->last_set_label == label_tick
9679 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9680 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9681 && REG_N_SETS (REGNO (x)) == 1
9682 && !REGNO_REG_SET_P
9683 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
9684 REGNO (x)))))
9686 unsigned HOST_WIDE_INT mask = rsp->last_set_nonzero_bits;
9688 if (GET_MODE_PRECISION (rsp->last_set_mode) < GET_MODE_PRECISION (mode))
9689 /* We don't know anything about the upper bits. */
9690 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (rsp->last_set_mode);
9692 *nonzero &= mask;
9693 return NULL;
9696 tem = get_last_value (x);
9698 if (tem)
9700 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9701 /* If X is narrower than MODE and TEM is a non-negative
9702 constant that would appear negative in the mode of X,
9703 sign-extend it for use in reg_nonzero_bits because some
9704 machines (maybe most) will actually do the sign-extension
9705 and this is the conservative approach.
9707 ??? For 2.5, try to tighten up the MD files in this regard
9708 instead of this kludge. */
9710 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode)
9711 && CONST_INT_P (tem)
9712 && INTVAL (tem) > 0
9713 && val_signbit_known_set_p (GET_MODE (x), INTVAL (tem)))
9714 tem = GEN_INT (INTVAL (tem) | ~GET_MODE_MASK (GET_MODE (x)));
9715 #endif
9716 return tem;
9718 else if (nonzero_sign_valid && rsp->nonzero_bits)
9720 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9722 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
9723 /* We don't know anything about the upper bits. */
9724 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9726 *nonzero &= mask;
9729 return NULL;
9732 /* Return the number of bits at the high-order end of X that are known to
9733 be equal to the sign bit. X will be used in mode MODE; if MODE is
9734 VOIDmode, X will be used in its own mode. The returned value will always
9735 be between 1 and the number of bits in MODE. */
9737 static rtx
9738 reg_num_sign_bit_copies_for_combine (const_rtx x, machine_mode mode,
9739 const_rtx known_x ATTRIBUTE_UNUSED,
9740 machine_mode known_mode
9741 ATTRIBUTE_UNUSED,
9742 unsigned int known_ret ATTRIBUTE_UNUSED,
9743 unsigned int *result)
9745 rtx tem;
9746 reg_stat_type *rsp;
9748 rsp = &reg_stat[REGNO (x)];
9749 if (rsp->last_set_value != 0
9750 && rsp->last_set_mode == mode
9751 && ((rsp->last_set_label >= label_tick_ebb_start
9752 && rsp->last_set_label < label_tick)
9753 || (rsp->last_set_label == label_tick
9754 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9755 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9756 && REG_N_SETS (REGNO (x)) == 1
9757 && !REGNO_REG_SET_P
9758 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
9759 REGNO (x)))))
9761 *result = rsp->last_set_sign_bit_copies;
9762 return NULL;
9765 tem = get_last_value (x);
9766 if (tem != 0)
9767 return tem;
9769 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9770 && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
9771 *result = rsp->sign_bit_copies;
9773 return NULL;
9776 /* Return the number of "extended" bits there are in X, when interpreted
9777 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9778 unsigned quantities, this is the number of high-order zero bits.
9779 For signed quantities, this is the number of copies of the sign bit
9780 minus 1. In both case, this function returns the number of "spare"
9781 bits. For example, if two quantities for which this function returns
9782 at least 1 are added, the addition is known not to overflow.
9784 This function will always return 0 unless called during combine, which
9785 implies that it must be called from a define_split. */
9787 unsigned int
9788 extended_count (const_rtx x, machine_mode mode, int unsignedp)
9790 if (nonzero_sign_valid == 0)
9791 return 0;
9793 return (unsignedp
9794 ? (HWI_COMPUTABLE_MODE_P (mode)
9795 ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
9796 - floor_log2 (nonzero_bits (x, mode)))
9797 : 0)
9798 : num_sign_bit_copies (x, mode) - 1);
9801 /* This function is called from `simplify_shift_const' to merge two
9802 outer operations. Specifically, we have already found that we need
9803 to perform operation *POP0 with constant *PCONST0 at the outermost
9804 position. We would now like to also perform OP1 with constant CONST1
9805 (with *POP0 being done last).
9807 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9808 the resulting operation. *PCOMP_P is set to 1 if we would need to
9809 complement the innermost operand, otherwise it is unchanged.
9811 MODE is the mode in which the operation will be done. No bits outside
9812 the width of this mode matter. It is assumed that the width of this mode
9813 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9815 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9816 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9817 result is simply *PCONST0.
9819 If the resulting operation cannot be expressed as one operation, we
9820 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9822 static int
9823 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
9825 enum rtx_code op0 = *pop0;
9826 HOST_WIDE_INT const0 = *pconst0;
9828 const0 &= GET_MODE_MASK (mode);
9829 const1 &= GET_MODE_MASK (mode);
9831 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9832 if (op0 == AND)
9833 const1 &= const0;
9835 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9836 if OP0 is SET. */
9838 if (op1 == UNKNOWN || op0 == SET)
9839 return 1;
9841 else if (op0 == UNKNOWN)
9842 op0 = op1, const0 = const1;
9844 else if (op0 == op1)
9846 switch (op0)
9848 case AND:
9849 const0 &= const1;
9850 break;
9851 case IOR:
9852 const0 |= const1;
9853 break;
9854 case XOR:
9855 const0 ^= const1;
9856 break;
9857 case PLUS:
9858 const0 += const1;
9859 break;
9860 case NEG:
9861 op0 = UNKNOWN;
9862 break;
9863 default:
9864 break;
9868 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9869 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
9870 return 0;
9872 /* If the two constants aren't the same, we can't do anything. The
9873 remaining six cases can all be done. */
9874 else if (const0 != const1)
9875 return 0;
9877 else
9878 switch (op0)
9880 case IOR:
9881 if (op1 == AND)
9882 /* (a & b) | b == b */
9883 op0 = SET;
9884 else /* op1 == XOR */
9885 /* (a ^ b) | b == a | b */
9887 break;
9889 case XOR:
9890 if (op1 == AND)
9891 /* (a & b) ^ b == (~a) & b */
9892 op0 = AND, *pcomp_p = 1;
9893 else /* op1 == IOR */
9894 /* (a | b) ^ b == a & ~b */
9895 op0 = AND, const0 = ~const0;
9896 break;
9898 case AND:
9899 if (op1 == IOR)
9900 /* (a | b) & b == b */
9901 op0 = SET;
9902 else /* op1 == XOR */
9903 /* (a ^ b) & b) == (~a) & b */
9904 *pcomp_p = 1;
9905 break;
9906 default:
9907 break;
9910 /* Check for NO-OP cases. */
9911 const0 &= GET_MODE_MASK (mode);
9912 if (const0 == 0
9913 && (op0 == IOR || op0 == XOR || op0 == PLUS))
9914 op0 = UNKNOWN;
9915 else if (const0 == 0 && op0 == AND)
9916 op0 = SET;
9917 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
9918 && op0 == AND)
9919 op0 = UNKNOWN;
9921 *pop0 = op0;
9923 /* ??? Slightly redundant with the above mask, but not entirely.
9924 Moving this above means we'd have to sign-extend the mode mask
9925 for the final test. */
9926 if (op0 != UNKNOWN && op0 != NEG)
9927 *pconst0 = trunc_int_for_mode (const0, mode);
9929 return 1;
9932 /* A helper to simplify_shift_const_1 to determine the mode we can perform
9933 the shift in. The original shift operation CODE is performed on OP in
9934 ORIG_MODE. Return the wider mode MODE if we can perform the operation
9935 in that mode. Return ORIG_MODE otherwise. We can also assume that the
9936 result of the shift is subject to operation OUTER_CODE with operand
9937 OUTER_CONST. */
9939 static machine_mode
9940 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
9941 machine_mode orig_mode, machine_mode mode,
9942 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
9944 if (orig_mode == mode)
9945 return mode;
9946 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
9948 /* In general we can't perform in wider mode for right shift and rotate. */
9949 switch (code)
9951 case ASHIFTRT:
9952 /* We can still widen if the bits brought in from the left are identical
9953 to the sign bit of ORIG_MODE. */
9954 if (num_sign_bit_copies (op, mode)
9955 > (unsigned) (GET_MODE_PRECISION (mode)
9956 - GET_MODE_PRECISION (orig_mode)))
9957 return mode;
9958 return orig_mode;
9960 case LSHIFTRT:
9961 /* Similarly here but with zero bits. */
9962 if (HWI_COMPUTABLE_MODE_P (mode)
9963 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
9964 return mode;
9966 /* We can also widen if the bits brought in will be masked off. This
9967 operation is performed in ORIG_MODE. */
9968 if (outer_code == AND)
9970 int care_bits = low_bitmask_len (orig_mode, outer_const);
9972 if (care_bits >= 0
9973 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
9974 return mode;
9976 /* fall through */
9978 case ROTATE:
9979 return orig_mode;
9981 case ROTATERT:
9982 gcc_unreachable ();
9984 default:
9985 return mode;
9989 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
9990 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
9991 if we cannot simplify it. Otherwise, return a simplified value.
9993 The shift is normally computed in the widest mode we find in VAROP, as
9994 long as it isn't a different number of words than RESULT_MODE. Exceptions
9995 are ASHIFTRT and ROTATE, which are always done in their original mode. */
9997 static rtx
9998 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
9999 rtx varop, int orig_count)
10001 enum rtx_code orig_code = code;
10002 rtx orig_varop = varop;
10003 int count;
10004 machine_mode mode = result_mode;
10005 machine_mode shift_mode, tmode;
10006 unsigned int mode_words
10007 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10008 /* We form (outer_op (code varop count) (outer_const)). */
10009 enum rtx_code outer_op = UNKNOWN;
10010 HOST_WIDE_INT outer_const = 0;
10011 int complement_p = 0;
10012 rtx new_rtx, x;
10014 /* Make sure and truncate the "natural" shift on the way in. We don't
10015 want to do this inside the loop as it makes it more difficult to
10016 combine shifts. */
10017 if (SHIFT_COUNT_TRUNCATED)
10018 orig_count &= GET_MODE_BITSIZE (mode) - 1;
10020 /* If we were given an invalid count, don't do anything except exactly
10021 what was requested. */
10023 if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode))
10024 return NULL_RTX;
10026 count = orig_count;
10028 /* Unless one of the branches of the `if' in this loop does a `continue',
10029 we will `break' the loop after the `if'. */
10031 while (count != 0)
10033 /* If we have an operand of (clobber (const_int 0)), fail. */
10034 if (GET_CODE (varop) == CLOBBER)
10035 return NULL_RTX;
10037 /* Convert ROTATERT to ROTATE. */
10038 if (code == ROTATERT)
10040 unsigned int bitsize = GET_MODE_PRECISION (result_mode);
10041 code = ROTATE;
10042 if (VECTOR_MODE_P (result_mode))
10043 count = bitsize / GET_MODE_NUNITS (result_mode) - count;
10044 else
10045 count = bitsize - count;
10048 shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
10049 mode, outer_op, outer_const);
10051 /* Handle cases where the count is greater than the size of the mode
10052 minus 1. For ASHIFT, use the size minus one as the count (this can
10053 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10054 take the count modulo the size. For other shifts, the result is
10055 zero.
10057 Since these shifts are being produced by the compiler by combining
10058 multiple operations, each of which are defined, we know what the
10059 result is supposed to be. */
10061 if (count > (GET_MODE_PRECISION (shift_mode) - 1))
10063 if (code == ASHIFTRT)
10064 count = GET_MODE_PRECISION (shift_mode) - 1;
10065 else if (code == ROTATE || code == ROTATERT)
10066 count %= GET_MODE_PRECISION (shift_mode);
10067 else
10069 /* We can't simply return zero because there may be an
10070 outer op. */
10071 varop = const0_rtx;
10072 count = 0;
10073 break;
10077 /* If we discovered we had to complement VAROP, leave. Making a NOT
10078 here would cause an infinite loop. */
10079 if (complement_p)
10080 break;
10082 /* An arithmetic right shift of a quantity known to be -1 or 0
10083 is a no-op. */
10084 if (code == ASHIFTRT
10085 && (num_sign_bit_copies (varop, shift_mode)
10086 == GET_MODE_PRECISION (shift_mode)))
10088 count = 0;
10089 break;
10092 /* If we are doing an arithmetic right shift and discarding all but
10093 the sign bit copies, this is equivalent to doing a shift by the
10094 bitsize minus one. Convert it into that shift because it will often
10095 allow other simplifications. */
10097 if (code == ASHIFTRT
10098 && (count + num_sign_bit_copies (varop, shift_mode)
10099 >= GET_MODE_PRECISION (shift_mode)))
10100 count = GET_MODE_PRECISION (shift_mode) - 1;
10102 /* We simplify the tests below and elsewhere by converting
10103 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10104 `make_compound_operation' will convert it to an ASHIFTRT for
10105 those machines (such as VAX) that don't have an LSHIFTRT. */
10106 if (code == ASHIFTRT
10107 && val_signbit_known_clear_p (shift_mode,
10108 nonzero_bits (varop, shift_mode)))
10109 code = LSHIFTRT;
10111 if (((code == LSHIFTRT
10112 && HWI_COMPUTABLE_MODE_P (shift_mode)
10113 && !(nonzero_bits (varop, shift_mode) >> count))
10114 || (code == ASHIFT
10115 && HWI_COMPUTABLE_MODE_P (shift_mode)
10116 && !((nonzero_bits (varop, shift_mode) << count)
10117 & GET_MODE_MASK (shift_mode))))
10118 && !side_effects_p (varop))
10119 varop = const0_rtx;
10121 switch (GET_CODE (varop))
10123 case SIGN_EXTEND:
10124 case ZERO_EXTEND:
10125 case SIGN_EXTRACT:
10126 case ZERO_EXTRACT:
10127 new_rtx = expand_compound_operation (varop);
10128 if (new_rtx != varop)
10130 varop = new_rtx;
10131 continue;
10133 break;
10135 case MEM:
10136 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10137 minus the width of a smaller mode, we can do this with a
10138 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10139 if ((code == ASHIFTRT || code == LSHIFTRT)
10140 && ! mode_dependent_address_p (XEXP (varop, 0),
10141 MEM_ADDR_SPACE (varop))
10142 && ! MEM_VOLATILE_P (varop)
10143 && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
10144 MODE_INT, 1)) != BLKmode)
10146 new_rtx = adjust_address_nv (varop, tmode,
10147 BYTES_BIG_ENDIAN ? 0
10148 : count / BITS_PER_UNIT);
10150 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10151 : ZERO_EXTEND, mode, new_rtx);
10152 count = 0;
10153 continue;
10155 break;
10157 case SUBREG:
10158 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10159 the same number of words as what we've seen so far. Then store
10160 the widest mode in MODE. */
10161 if (subreg_lowpart_p (varop)
10162 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10163 > GET_MODE_SIZE (GET_MODE (varop)))
10164 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10165 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10166 == mode_words
10167 && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
10168 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
10170 varop = SUBREG_REG (varop);
10171 if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
10172 mode = GET_MODE (varop);
10173 continue;
10175 break;
10177 case MULT:
10178 /* Some machines use MULT instead of ASHIFT because MULT
10179 is cheaper. But it is still better on those machines to
10180 merge two shifts into one. */
10181 if (CONST_INT_P (XEXP (varop, 1))
10182 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10184 varop
10185 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10186 XEXP (varop, 0),
10187 GEN_INT (exact_log2 (
10188 UINTVAL (XEXP (varop, 1)))));
10189 continue;
10191 break;
10193 case UDIV:
10194 /* Similar, for when divides are cheaper. */
10195 if (CONST_INT_P (XEXP (varop, 1))
10196 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10198 varop
10199 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10200 XEXP (varop, 0),
10201 GEN_INT (exact_log2 (
10202 UINTVAL (XEXP (varop, 1)))));
10203 continue;
10205 break;
10207 case ASHIFTRT:
10208 /* If we are extracting just the sign bit of an arithmetic
10209 right shift, that shift is not needed. However, the sign
10210 bit of a wider mode may be different from what would be
10211 interpreted as the sign bit in a narrower mode, so, if
10212 the result is narrower, don't discard the shift. */
10213 if (code == LSHIFTRT
10214 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10215 && (GET_MODE_BITSIZE (result_mode)
10216 >= GET_MODE_BITSIZE (GET_MODE (varop))))
10218 varop = XEXP (varop, 0);
10219 continue;
10222 /* ... fall through ... */
10224 case LSHIFTRT:
10225 case ASHIFT:
10226 case ROTATE:
10227 /* Here we have two nested shifts. The result is usually the
10228 AND of a new shift with a mask. We compute the result below. */
10229 if (CONST_INT_P (XEXP (varop, 1))
10230 && INTVAL (XEXP (varop, 1)) >= 0
10231 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
10232 && HWI_COMPUTABLE_MODE_P (result_mode)
10233 && HWI_COMPUTABLE_MODE_P (mode)
10234 && !VECTOR_MODE_P (result_mode))
10236 enum rtx_code first_code = GET_CODE (varop);
10237 unsigned int first_count = INTVAL (XEXP (varop, 1));
10238 unsigned HOST_WIDE_INT mask;
10239 rtx mask_rtx;
10241 /* We have one common special case. We can't do any merging if
10242 the inner code is an ASHIFTRT of a smaller mode. However, if
10243 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10244 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10245 we can convert it to
10246 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10247 This simplifies certain SIGN_EXTEND operations. */
10248 if (code == ASHIFT && first_code == ASHIFTRT
10249 && count == (GET_MODE_PRECISION (result_mode)
10250 - GET_MODE_PRECISION (GET_MODE (varop))))
10252 /* C3 has the low-order C1 bits zero. */
10254 mask = GET_MODE_MASK (mode)
10255 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
10257 varop = simplify_and_const_int (NULL_RTX, result_mode,
10258 XEXP (varop, 0), mask);
10259 varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
10260 varop, count);
10261 count = first_count;
10262 code = ASHIFTRT;
10263 continue;
10266 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10267 than C1 high-order bits equal to the sign bit, we can convert
10268 this to either an ASHIFT or an ASHIFTRT depending on the
10269 two counts.
10271 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10273 if (code == ASHIFTRT && first_code == ASHIFT
10274 && GET_MODE (varop) == shift_mode
10275 && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10276 > first_count))
10278 varop = XEXP (varop, 0);
10279 count -= first_count;
10280 if (count < 0)
10282 count = -count;
10283 code = ASHIFT;
10286 continue;
10289 /* There are some cases we can't do. If CODE is ASHIFTRT,
10290 we can only do this if FIRST_CODE is also ASHIFTRT.
10292 We can't do the case when CODE is ROTATE and FIRST_CODE is
10293 ASHIFTRT.
10295 If the mode of this shift is not the mode of the outer shift,
10296 we can't do this if either shift is a right shift or ROTATE.
10298 Finally, we can't do any of these if the mode is too wide
10299 unless the codes are the same.
10301 Handle the case where the shift codes are the same
10302 first. */
10304 if (code == first_code)
10306 if (GET_MODE (varop) != result_mode
10307 && (code == ASHIFTRT || code == LSHIFTRT
10308 || code == ROTATE))
10309 break;
10311 count += first_count;
10312 varop = XEXP (varop, 0);
10313 continue;
10316 if (code == ASHIFTRT
10317 || (code == ROTATE && first_code == ASHIFTRT)
10318 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
10319 || (GET_MODE (varop) != result_mode
10320 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10321 || first_code == ROTATE
10322 || code == ROTATE)))
10323 break;
10325 /* To compute the mask to apply after the shift, shift the
10326 nonzero bits of the inner shift the same way the
10327 outer shift will. */
10329 mask_rtx = gen_int_mode (nonzero_bits (varop, GET_MODE (varop)),
10330 result_mode);
10332 mask_rtx
10333 = simplify_const_binary_operation (code, result_mode, mask_rtx,
10334 GEN_INT (count));
10336 /* Give up if we can't compute an outer operation to use. */
10337 if (mask_rtx == 0
10338 || !CONST_INT_P (mask_rtx)
10339 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10340 INTVAL (mask_rtx),
10341 result_mode, &complement_p))
10342 break;
10344 /* If the shifts are in the same direction, we add the
10345 counts. Otherwise, we subtract them. */
10346 if ((code == ASHIFTRT || code == LSHIFTRT)
10347 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10348 count += first_count;
10349 else
10350 count -= first_count;
10352 /* If COUNT is positive, the new shift is usually CODE,
10353 except for the two exceptions below, in which case it is
10354 FIRST_CODE. If the count is negative, FIRST_CODE should
10355 always be used */
10356 if (count > 0
10357 && ((first_code == ROTATE && code == ASHIFT)
10358 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10359 code = first_code;
10360 else if (count < 0)
10361 code = first_code, count = -count;
10363 varop = XEXP (varop, 0);
10364 continue;
10367 /* If we have (A << B << C) for any shift, we can convert this to
10368 (A << C << B). This wins if A is a constant. Only try this if
10369 B is not a constant. */
10371 else if (GET_CODE (varop) == code
10372 && CONST_INT_P (XEXP (varop, 0))
10373 && !CONST_INT_P (XEXP (varop, 1)))
10375 rtx new_rtx = simplify_const_binary_operation (code, mode,
10376 XEXP (varop, 0),
10377 GEN_INT (count));
10378 varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10379 count = 0;
10380 continue;
10382 break;
10384 case NOT:
10385 if (VECTOR_MODE_P (mode))
10386 break;
10388 /* Make this fit the case below. */
10389 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10390 continue;
10392 case IOR:
10393 case AND:
10394 case XOR:
10395 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10396 with C the size of VAROP - 1 and the shift is logical if
10397 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10398 we have an (le X 0) operation. If we have an arithmetic shift
10399 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10400 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10402 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10403 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10404 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10405 && (code == LSHIFTRT || code == ASHIFTRT)
10406 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10407 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10409 count = 0;
10410 varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10411 const0_rtx);
10413 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10414 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10416 continue;
10419 /* If we have (shift (logical)), move the logical to the outside
10420 to allow it to possibly combine with another logical and the
10421 shift to combine with another shift. This also canonicalizes to
10422 what a ZERO_EXTRACT looks like. Also, some machines have
10423 (and (shift)) insns. */
10425 if (CONST_INT_P (XEXP (varop, 1))
10426 /* We can't do this if we have (ashiftrt (xor)) and the
10427 constant has its sign bit set in shift_mode with shift_mode
10428 wider than result_mode. */
10429 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10430 && result_mode != shift_mode
10431 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10432 shift_mode))
10433 && (new_rtx = simplify_const_binary_operation
10434 (code, result_mode,
10435 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10436 GEN_INT (count))) != 0
10437 && CONST_INT_P (new_rtx)
10438 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10439 INTVAL (new_rtx), result_mode, &complement_p))
10441 varop = XEXP (varop, 0);
10442 continue;
10445 /* If we can't do that, try to simplify the shift in each arm of the
10446 logical expression, make a new logical expression, and apply
10447 the inverse distributive law. This also can't be done for
10448 (ashiftrt (xor)) where we've widened the shift and the constant
10449 changes the sign bit. */
10450 if (CONST_INT_P (XEXP (varop, 1))
10451 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10452 && result_mode != shift_mode
10453 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10454 shift_mode)))
10456 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10457 XEXP (varop, 0), count);
10458 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10459 XEXP (varop, 1), count);
10461 varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10462 lhs, rhs);
10463 varop = apply_distributive_law (varop);
10465 count = 0;
10466 continue;
10468 break;
10470 case EQ:
10471 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10472 says that the sign bit can be tested, FOO has mode MODE, C is
10473 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10474 that may be nonzero. */
10475 if (code == LSHIFTRT
10476 && XEXP (varop, 1) == const0_rtx
10477 && GET_MODE (XEXP (varop, 0)) == result_mode
10478 && count == (GET_MODE_PRECISION (result_mode) - 1)
10479 && HWI_COMPUTABLE_MODE_P (result_mode)
10480 && STORE_FLAG_VALUE == -1
10481 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10482 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10483 &complement_p))
10485 varop = XEXP (varop, 0);
10486 count = 0;
10487 continue;
10489 break;
10491 case NEG:
10492 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10493 than the number of bits in the mode is equivalent to A. */
10494 if (code == LSHIFTRT
10495 && count == (GET_MODE_PRECISION (result_mode) - 1)
10496 && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10498 varop = XEXP (varop, 0);
10499 count = 0;
10500 continue;
10503 /* NEG commutes with ASHIFT since it is multiplication. Move the
10504 NEG outside to allow shifts to combine. */
10505 if (code == ASHIFT
10506 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10507 &complement_p))
10509 varop = XEXP (varop, 0);
10510 continue;
10512 break;
10514 case PLUS:
10515 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10516 is one less than the number of bits in the mode is
10517 equivalent to (xor A 1). */
10518 if (code == LSHIFTRT
10519 && count == (GET_MODE_PRECISION (result_mode) - 1)
10520 && XEXP (varop, 1) == constm1_rtx
10521 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10522 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10523 &complement_p))
10525 count = 0;
10526 varop = XEXP (varop, 0);
10527 continue;
10530 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10531 that might be nonzero in BAR are those being shifted out and those
10532 bits are known zero in FOO, we can replace the PLUS with FOO.
10533 Similarly in the other operand order. This code occurs when
10534 we are computing the size of a variable-size array. */
10536 if ((code == ASHIFTRT || code == LSHIFTRT)
10537 && count < HOST_BITS_PER_WIDE_INT
10538 && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10539 && (nonzero_bits (XEXP (varop, 1), result_mode)
10540 & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10542 varop = XEXP (varop, 0);
10543 continue;
10545 else if ((code == ASHIFTRT || code == LSHIFTRT)
10546 && count < HOST_BITS_PER_WIDE_INT
10547 && HWI_COMPUTABLE_MODE_P (result_mode)
10548 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10549 >> count)
10550 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10551 & nonzero_bits (XEXP (varop, 1),
10552 result_mode)))
10554 varop = XEXP (varop, 1);
10555 continue;
10558 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10559 if (code == ASHIFT
10560 && CONST_INT_P (XEXP (varop, 1))
10561 && (new_rtx = simplify_const_binary_operation
10562 (ASHIFT, result_mode,
10563 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10564 GEN_INT (count))) != 0
10565 && CONST_INT_P (new_rtx)
10566 && merge_outer_ops (&outer_op, &outer_const, PLUS,
10567 INTVAL (new_rtx), result_mode, &complement_p))
10569 varop = XEXP (varop, 0);
10570 continue;
10573 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10574 signbit', and attempt to change the PLUS to an XOR and move it to
10575 the outer operation as is done above in the AND/IOR/XOR case
10576 leg for shift(logical). See details in logical handling above
10577 for reasoning in doing so. */
10578 if (code == LSHIFTRT
10579 && CONST_INT_P (XEXP (varop, 1))
10580 && mode_signbit_p (result_mode, XEXP (varop, 1))
10581 && (new_rtx = simplify_const_binary_operation
10582 (code, result_mode,
10583 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10584 GEN_INT (count))) != 0
10585 && CONST_INT_P (new_rtx)
10586 && merge_outer_ops (&outer_op, &outer_const, XOR,
10587 INTVAL (new_rtx), result_mode, &complement_p))
10589 varop = XEXP (varop, 0);
10590 continue;
10593 break;
10595 case MINUS:
10596 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10597 with C the size of VAROP - 1 and the shift is logical if
10598 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10599 we have a (gt X 0) operation. If the shift is arithmetic with
10600 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10601 we have a (neg (gt X 0)) operation. */
10603 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10604 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10605 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10606 && (code == LSHIFTRT || code == ASHIFTRT)
10607 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10608 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10609 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10611 count = 0;
10612 varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10613 const0_rtx);
10615 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10616 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10618 continue;
10620 break;
10622 case TRUNCATE:
10623 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10624 if the truncate does not affect the value. */
10625 if (code == LSHIFTRT
10626 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10627 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10628 && (INTVAL (XEXP (XEXP (varop, 0), 1))
10629 >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0)))
10630 - GET_MODE_PRECISION (GET_MODE (varop)))))
10632 rtx varop_inner = XEXP (varop, 0);
10634 varop_inner
10635 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10636 XEXP (varop_inner, 0),
10637 GEN_INT
10638 (count + INTVAL (XEXP (varop_inner, 1))));
10639 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10640 count = 0;
10641 continue;
10643 break;
10645 default:
10646 break;
10649 break;
10652 shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10653 outer_op, outer_const);
10655 /* We have now finished analyzing the shift. The result should be
10656 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10657 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10658 to the result of the shift. OUTER_CONST is the relevant constant,
10659 but we must turn off all bits turned off in the shift. */
10661 if (outer_op == UNKNOWN
10662 && orig_code == code && orig_count == count
10663 && varop == orig_varop
10664 && shift_mode == GET_MODE (varop))
10665 return NULL_RTX;
10667 /* Make a SUBREG if necessary. If we can't make it, fail. */
10668 varop = gen_lowpart (shift_mode, varop);
10669 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10670 return NULL_RTX;
10672 /* If we have an outer operation and we just made a shift, it is
10673 possible that we could have simplified the shift were it not
10674 for the outer operation. So try to do the simplification
10675 recursively. */
10677 if (outer_op != UNKNOWN)
10678 x = simplify_shift_const_1 (code, shift_mode, varop, count);
10679 else
10680 x = NULL_RTX;
10682 if (x == NULL_RTX)
10683 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10685 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10686 turn off all the bits that the shift would have turned off. */
10687 if (orig_code == LSHIFTRT && result_mode != shift_mode)
10688 x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10689 GET_MODE_MASK (result_mode) >> orig_count);
10691 /* Do the remainder of the processing in RESULT_MODE. */
10692 x = gen_lowpart_or_truncate (result_mode, x);
10694 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10695 operation. */
10696 if (complement_p)
10697 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10699 if (outer_op != UNKNOWN)
10701 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10702 && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
10703 outer_const = trunc_int_for_mode (outer_const, result_mode);
10705 if (outer_op == AND)
10706 x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10707 else if (outer_op == SET)
10709 /* This means that we have determined that the result is
10710 equivalent to a constant. This should be rare. */
10711 if (!side_effects_p (x))
10712 x = GEN_INT (outer_const);
10714 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10715 x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10716 else
10717 x = simplify_gen_binary (outer_op, result_mode, x,
10718 GEN_INT (outer_const));
10721 return x;
10724 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10725 The result of the shift is RESULT_MODE. If we cannot simplify it,
10726 return X or, if it is NULL, synthesize the expression with
10727 simplify_gen_binary. Otherwise, return a simplified value.
10729 The shift is normally computed in the widest mode we find in VAROP, as
10730 long as it isn't a different number of words than RESULT_MODE. Exceptions
10731 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10733 static rtx
10734 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
10735 rtx varop, int count)
10737 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10738 if (tem)
10739 return tem;
10741 if (!x)
10742 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10743 if (GET_MODE (x) != result_mode)
10744 x = gen_lowpart (result_mode, x);
10745 return x;
10749 /* Like recog, but we receive the address of a pointer to a new pattern.
10750 We try to match the rtx that the pointer points to.
10751 If that fails, we may try to modify or replace the pattern,
10752 storing the replacement into the same pointer object.
10754 Modifications include deletion or addition of CLOBBERs.
10756 PNOTES is a pointer to a location where any REG_UNUSED notes added for
10757 the CLOBBERs are placed.
10759 The value is the final insn code from the pattern ultimately matched,
10760 or -1. */
10762 static int
10763 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
10765 rtx pat = *pnewpat;
10766 rtx pat_without_clobbers;
10767 int insn_code_number;
10768 int num_clobbers_to_add = 0;
10769 int i;
10770 rtx notes = NULL_RTX;
10771 rtx old_notes, old_pat;
10772 int old_icode;
10774 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10775 we use to indicate that something didn't match. If we find such a
10776 thing, force rejection. */
10777 if (GET_CODE (pat) == PARALLEL)
10778 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10779 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10780 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10781 return -1;
10783 old_pat = PATTERN (insn);
10784 old_notes = REG_NOTES (insn);
10785 PATTERN (insn) = pat;
10786 REG_NOTES (insn) = NULL_RTX;
10788 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10789 if (dump_file && (dump_flags & TDF_DETAILS))
10791 if (insn_code_number < 0)
10792 fputs ("Failed to match this instruction:\n", dump_file);
10793 else
10794 fputs ("Successfully matched this instruction:\n", dump_file);
10795 print_rtl_single (dump_file, pat);
10798 /* If it isn't, there is the possibility that we previously had an insn
10799 that clobbered some register as a side effect, but the combined
10800 insn doesn't need to do that. So try once more without the clobbers
10801 unless this represents an ASM insn. */
10803 if (insn_code_number < 0 && ! check_asm_operands (pat)
10804 && GET_CODE (pat) == PARALLEL)
10806 int pos;
10808 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
10809 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
10811 if (i != pos)
10812 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
10813 pos++;
10816 SUBST_INT (XVECLEN (pat, 0), pos);
10818 if (pos == 1)
10819 pat = XVECEXP (pat, 0, 0);
10821 PATTERN (insn) = pat;
10822 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10823 if (dump_file && (dump_flags & TDF_DETAILS))
10825 if (insn_code_number < 0)
10826 fputs ("Failed to match this instruction:\n", dump_file);
10827 else
10828 fputs ("Successfully matched this instruction:\n", dump_file);
10829 print_rtl_single (dump_file, pat);
10833 pat_without_clobbers = pat;
10835 PATTERN (insn) = old_pat;
10836 REG_NOTES (insn) = old_notes;
10838 /* Recognize all noop sets, these will be killed by followup pass. */
10839 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
10840 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
10842 /* If we had any clobbers to add, make a new pattern than contains
10843 them. Then check to make sure that all of them are dead. */
10844 if (num_clobbers_to_add)
10846 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
10847 rtvec_alloc (GET_CODE (pat) == PARALLEL
10848 ? (XVECLEN (pat, 0)
10849 + num_clobbers_to_add)
10850 : num_clobbers_to_add + 1));
10852 if (GET_CODE (pat) == PARALLEL)
10853 for (i = 0; i < XVECLEN (pat, 0); i++)
10854 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
10855 else
10856 XVECEXP (newpat, 0, 0) = pat;
10858 add_clobbers (newpat, insn_code_number);
10860 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
10861 i < XVECLEN (newpat, 0); i++)
10863 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
10864 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
10865 return -1;
10866 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
10868 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
10869 notes = alloc_reg_note (REG_UNUSED,
10870 XEXP (XVECEXP (newpat, 0, i), 0), notes);
10873 pat = newpat;
10876 if (insn_code_number >= 0
10877 && insn_code_number != NOOP_MOVE_INSN_CODE)
10879 old_pat = PATTERN (insn);
10880 old_notes = REG_NOTES (insn);
10881 old_icode = INSN_CODE (insn);
10882 PATTERN (insn) = pat;
10883 REG_NOTES (insn) = notes;
10885 /* Allow targets to reject combined insn. */
10886 if (!targetm.legitimate_combined_insn (insn))
10888 if (dump_file && (dump_flags & TDF_DETAILS))
10889 fputs ("Instruction not appropriate for target.",
10890 dump_file);
10892 /* Callers expect recog_for_combine to strip
10893 clobbers from the pattern on failure. */
10894 pat = pat_without_clobbers;
10895 notes = NULL_RTX;
10897 insn_code_number = -1;
10900 PATTERN (insn) = old_pat;
10901 REG_NOTES (insn) = old_notes;
10902 INSN_CODE (insn) = old_icode;
10905 *pnewpat = pat;
10906 *pnotes = notes;
10908 return insn_code_number;
10911 /* Like gen_lowpart_general but for use by combine. In combine it
10912 is not possible to create any new pseudoregs. However, it is
10913 safe to create invalid memory addresses, because combine will
10914 try to recognize them and all they will do is make the combine
10915 attempt fail.
10917 If for some reason this cannot do its job, an rtx
10918 (clobber (const_int 0)) is returned.
10919 An insn containing that will not be recognized. */
10921 static rtx
10922 gen_lowpart_for_combine (machine_mode omode, rtx x)
10924 machine_mode imode = GET_MODE (x);
10925 unsigned int osize = GET_MODE_SIZE (omode);
10926 unsigned int isize = GET_MODE_SIZE (imode);
10927 rtx result;
10929 if (omode == imode)
10930 return x;
10932 /* We can only support MODE being wider than a word if X is a
10933 constant integer or has a mode the same size. */
10934 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
10935 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
10936 goto fail;
10938 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
10939 won't know what to do. So we will strip off the SUBREG here and
10940 process normally. */
10941 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
10943 x = SUBREG_REG (x);
10945 /* For use in case we fall down into the address adjustments
10946 further below, we need to adjust the known mode and size of
10947 x; imode and isize, since we just adjusted x. */
10948 imode = GET_MODE (x);
10950 if (imode == omode)
10951 return x;
10953 isize = GET_MODE_SIZE (imode);
10956 result = gen_lowpart_common (omode, x);
10958 if (result)
10959 return result;
10961 if (MEM_P (x))
10963 int offset = 0;
10965 /* Refuse to work on a volatile memory ref or one with a mode-dependent
10966 address. */
10967 if (MEM_VOLATILE_P (x)
10968 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
10969 goto fail;
10971 /* If we want to refer to something bigger than the original memref,
10972 generate a paradoxical subreg instead. That will force a reload
10973 of the original memref X. */
10974 if (isize < osize)
10975 return gen_rtx_SUBREG (omode, x, 0);
10977 if (WORDS_BIG_ENDIAN)
10978 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
10980 /* Adjust the address so that the address-after-the-data is
10981 unchanged. */
10982 if (BYTES_BIG_ENDIAN)
10983 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
10985 return adjust_address_nv (x, omode, offset);
10988 /* If X is a comparison operator, rewrite it in a new mode. This
10989 probably won't match, but may allow further simplifications. */
10990 else if (COMPARISON_P (x))
10991 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
10993 /* If we couldn't simplify X any other way, just enclose it in a
10994 SUBREG. Normally, this SUBREG won't match, but some patterns may
10995 include an explicit SUBREG or we may simplify it further in combine. */
10996 else
10998 int offset = 0;
10999 rtx res;
11001 offset = subreg_lowpart_offset (omode, imode);
11002 if (imode == VOIDmode)
11004 imode = int_mode_for_mode (omode);
11005 x = gen_lowpart_common (imode, x);
11006 if (x == NULL)
11007 goto fail;
11009 res = simplify_gen_subreg (omode, x, imode, offset);
11010 if (res)
11011 return res;
11014 fail:
11015 return gen_rtx_CLOBBER (omode, const0_rtx);
11018 /* Try to simplify a comparison between OP0 and a constant OP1,
11019 where CODE is the comparison code that will be tested, into a
11020 (CODE OP0 const0_rtx) form.
11022 The result is a possibly different comparison code to use.
11023 *POP1 may be updated. */
11025 static enum rtx_code
11026 simplify_compare_const (enum rtx_code code, machine_mode mode,
11027 rtx op0, rtx *pop1)
11029 unsigned int mode_width = GET_MODE_PRECISION (mode);
11030 HOST_WIDE_INT const_op = INTVAL (*pop1);
11032 /* Get the constant we are comparing against and turn off all bits
11033 not on in our mode. */
11034 if (mode != VOIDmode)
11035 const_op = trunc_int_for_mode (const_op, mode);
11037 /* If we are comparing against a constant power of two and the value
11038 being compared can only have that single bit nonzero (e.g., it was
11039 `and'ed with that bit), we can replace this with a comparison
11040 with zero. */
11041 if (const_op
11042 && (code == EQ || code == NE || code == GE || code == GEU
11043 || code == LT || code == LTU)
11044 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11045 && exact_log2 (const_op & GET_MODE_MASK (mode)) >= 0
11046 && (nonzero_bits (op0, mode)
11047 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (mode))))
11049 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11050 const_op = 0;
11053 /* Similarly, if we are comparing a value known to be either -1 or
11054 0 with -1, change it to the opposite comparison against zero. */
11055 if (const_op == -1
11056 && (code == EQ || code == NE || code == GT || code == LE
11057 || code == GEU || code == LTU)
11058 && num_sign_bit_copies (op0, mode) == mode_width)
11060 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11061 const_op = 0;
11064 /* Do some canonicalizations based on the comparison code. We prefer
11065 comparisons against zero and then prefer equality comparisons.
11066 If we can reduce the size of a constant, we will do that too. */
11067 switch (code)
11069 case LT:
11070 /* < C is equivalent to <= (C - 1) */
11071 if (const_op > 0)
11073 const_op -= 1;
11074 code = LE;
11075 /* ... fall through to LE case below. */
11077 else
11078 break;
11080 case LE:
11081 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11082 if (const_op < 0)
11084 const_op += 1;
11085 code = LT;
11088 /* If we are doing a <= 0 comparison on a value known to have
11089 a zero sign bit, we can replace this with == 0. */
11090 else if (const_op == 0
11091 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11092 && (nonzero_bits (op0, mode)
11093 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11094 == 0)
11095 code = EQ;
11096 break;
11098 case GE:
11099 /* >= C is equivalent to > (C - 1). */
11100 if (const_op > 0)
11102 const_op -= 1;
11103 code = GT;
11104 /* ... fall through to GT below. */
11106 else
11107 break;
11109 case GT:
11110 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11111 if (const_op < 0)
11113 const_op += 1;
11114 code = GE;
11117 /* If we are doing a > 0 comparison on a value known to have
11118 a zero sign bit, we can replace this with != 0. */
11119 else if (const_op == 0
11120 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11121 && (nonzero_bits (op0, mode)
11122 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11123 == 0)
11124 code = NE;
11125 break;
11127 case LTU:
11128 /* < C is equivalent to <= (C - 1). */
11129 if (const_op > 0)
11131 const_op -= 1;
11132 code = LEU;
11133 /* ... fall through ... */
11135 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11136 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11137 && (unsigned HOST_WIDE_INT) const_op
11138 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11140 const_op = 0;
11141 code = GE;
11142 break;
11144 else
11145 break;
11147 case LEU:
11148 /* unsigned <= 0 is equivalent to == 0 */
11149 if (const_op == 0)
11150 code = EQ;
11151 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11152 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11153 && (unsigned HOST_WIDE_INT) const_op
11154 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11156 const_op = 0;
11157 code = GE;
11159 break;
11161 case GEU:
11162 /* >= C is equivalent to > (C - 1). */
11163 if (const_op > 1)
11165 const_op -= 1;
11166 code = GTU;
11167 /* ... fall through ... */
11170 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11171 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11172 && (unsigned HOST_WIDE_INT) const_op
11173 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11175 const_op = 0;
11176 code = LT;
11177 break;
11179 else
11180 break;
11182 case GTU:
11183 /* unsigned > 0 is equivalent to != 0 */
11184 if (const_op == 0)
11185 code = NE;
11186 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11187 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11188 && (unsigned HOST_WIDE_INT) const_op
11189 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11191 const_op = 0;
11192 code = LT;
11194 break;
11196 default:
11197 break;
11200 *pop1 = GEN_INT (const_op);
11201 return code;
11204 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11205 comparison code that will be tested.
11207 The result is a possibly different comparison code to use. *POP0 and
11208 *POP1 may be updated.
11210 It is possible that we might detect that a comparison is either always
11211 true or always false. However, we do not perform general constant
11212 folding in combine, so this knowledge isn't useful. Such tautologies
11213 should have been detected earlier. Hence we ignore all such cases. */
11215 static enum rtx_code
11216 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11218 rtx op0 = *pop0;
11219 rtx op1 = *pop1;
11220 rtx tem, tem1;
11221 int i;
11222 machine_mode mode, tmode;
11224 /* Try a few ways of applying the same transformation to both operands. */
11225 while (1)
11227 #ifndef WORD_REGISTER_OPERATIONS
11228 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11229 so check specially. */
11230 if (code != GTU && code != GEU && code != LTU && code != LEU
11231 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11232 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11233 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11234 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11235 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11236 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
11237 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
11238 && CONST_INT_P (XEXP (op0, 1))
11239 && XEXP (op0, 1) == XEXP (op1, 1)
11240 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11241 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11242 && (INTVAL (XEXP (op0, 1))
11243 == (GET_MODE_PRECISION (GET_MODE (op0))
11244 - (GET_MODE_PRECISION
11245 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
11247 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11248 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11250 #endif
11252 /* If both operands are the same constant shift, see if we can ignore the
11253 shift. We can if the shift is a rotate or if the bits shifted out of
11254 this shift are known to be zero for both inputs and if the type of
11255 comparison is compatible with the shift. */
11256 if (GET_CODE (op0) == GET_CODE (op1)
11257 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11258 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11259 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11260 && (code != GT && code != LT && code != GE && code != LE))
11261 || (GET_CODE (op0) == ASHIFTRT
11262 && (code != GTU && code != LTU
11263 && code != GEU && code != LEU)))
11264 && CONST_INT_P (XEXP (op0, 1))
11265 && INTVAL (XEXP (op0, 1)) >= 0
11266 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11267 && XEXP (op0, 1) == XEXP (op1, 1))
11269 machine_mode mode = GET_MODE (op0);
11270 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11271 int shift_count = INTVAL (XEXP (op0, 1));
11273 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11274 mask &= (mask >> shift_count) << shift_count;
11275 else if (GET_CODE (op0) == ASHIFT)
11276 mask = (mask & (mask << shift_count)) >> shift_count;
11278 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11279 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11280 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11281 else
11282 break;
11285 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11286 SUBREGs are of the same mode, and, in both cases, the AND would
11287 be redundant if the comparison was done in the narrower mode,
11288 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11289 and the operand's possibly nonzero bits are 0xffffff01; in that case
11290 if we only care about QImode, we don't need the AND). This case
11291 occurs if the output mode of an scc insn is not SImode and
11292 STORE_FLAG_VALUE == 1 (e.g., the 386).
11294 Similarly, check for a case where the AND's are ZERO_EXTEND
11295 operations from some narrower mode even though a SUBREG is not
11296 present. */
11298 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11299 && CONST_INT_P (XEXP (op0, 1))
11300 && CONST_INT_P (XEXP (op1, 1)))
11302 rtx inner_op0 = XEXP (op0, 0);
11303 rtx inner_op1 = XEXP (op1, 0);
11304 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11305 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11306 int changed = 0;
11308 if (paradoxical_subreg_p (inner_op0)
11309 && GET_CODE (inner_op1) == SUBREG
11310 && (GET_MODE (SUBREG_REG (inner_op0))
11311 == GET_MODE (SUBREG_REG (inner_op1)))
11312 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11313 <= HOST_BITS_PER_WIDE_INT)
11314 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11315 GET_MODE (SUBREG_REG (inner_op0)))))
11316 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11317 GET_MODE (SUBREG_REG (inner_op1))))))
11319 op0 = SUBREG_REG (inner_op0);
11320 op1 = SUBREG_REG (inner_op1);
11322 /* The resulting comparison is always unsigned since we masked
11323 off the original sign bit. */
11324 code = unsigned_condition (code);
11326 changed = 1;
11329 else if (c0 == c1)
11330 for (tmode = GET_CLASS_NARROWEST_MODE
11331 (GET_MODE_CLASS (GET_MODE (op0)));
11332 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
11333 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11335 op0 = gen_lowpart (tmode, inner_op0);
11336 op1 = gen_lowpart (tmode, inner_op1);
11337 code = unsigned_condition (code);
11338 changed = 1;
11339 break;
11342 if (! changed)
11343 break;
11346 /* If both operands are NOT, we can strip off the outer operation
11347 and adjust the comparison code for swapped operands; similarly for
11348 NEG, except that this must be an equality comparison. */
11349 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11350 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11351 && (code == EQ || code == NE)))
11352 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11354 else
11355 break;
11358 /* If the first operand is a constant, swap the operands and adjust the
11359 comparison code appropriately, but don't do this if the second operand
11360 is already a constant integer. */
11361 if (swap_commutative_operands_p (op0, op1))
11363 tem = op0, op0 = op1, op1 = tem;
11364 code = swap_condition (code);
11367 /* We now enter a loop during which we will try to simplify the comparison.
11368 For the most part, we only are concerned with comparisons with zero,
11369 but some things may really be comparisons with zero but not start
11370 out looking that way. */
11372 while (CONST_INT_P (op1))
11374 machine_mode mode = GET_MODE (op0);
11375 unsigned int mode_width = GET_MODE_PRECISION (mode);
11376 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11377 int equality_comparison_p;
11378 int sign_bit_comparison_p;
11379 int unsigned_comparison_p;
11380 HOST_WIDE_INT const_op;
11382 /* We only want to handle integral modes. This catches VOIDmode,
11383 CCmode, and the floating-point modes. An exception is that we
11384 can handle VOIDmode if OP0 is a COMPARE or a comparison
11385 operation. */
11387 if (GET_MODE_CLASS (mode) != MODE_INT
11388 && ! (mode == VOIDmode
11389 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
11390 break;
11392 /* Try to simplify the compare to constant, possibly changing the
11393 comparison op, and/or changing op1 to zero. */
11394 code = simplify_compare_const (code, mode, op0, &op1);
11395 const_op = INTVAL (op1);
11397 /* Compute some predicates to simplify code below. */
11399 equality_comparison_p = (code == EQ || code == NE);
11400 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11401 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11402 || code == GEU);
11404 /* If this is a sign bit comparison and we can do arithmetic in
11405 MODE, say that we will only be needing the sign bit of OP0. */
11406 if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
11407 op0 = force_to_mode (op0, mode,
11408 (unsigned HOST_WIDE_INT) 1
11409 << (GET_MODE_PRECISION (mode) - 1),
11412 /* Now try cases based on the opcode of OP0. If none of the cases
11413 does a "continue", we exit this loop immediately after the
11414 switch. */
11416 switch (GET_CODE (op0))
11418 case ZERO_EXTRACT:
11419 /* If we are extracting a single bit from a variable position in
11420 a constant that has only a single bit set and are comparing it
11421 with zero, we can convert this into an equality comparison
11422 between the position and the location of the single bit. */
11423 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11424 have already reduced the shift count modulo the word size. */
11425 if (!SHIFT_COUNT_TRUNCATED
11426 && CONST_INT_P (XEXP (op0, 0))
11427 && XEXP (op0, 1) == const1_rtx
11428 && equality_comparison_p && const_op == 0
11429 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11431 if (BITS_BIG_ENDIAN)
11432 i = BITS_PER_WORD - 1 - i;
11434 op0 = XEXP (op0, 2);
11435 op1 = GEN_INT (i);
11436 const_op = i;
11438 /* Result is nonzero iff shift count is equal to I. */
11439 code = reverse_condition (code);
11440 continue;
11443 /* ... fall through ... */
11445 case SIGN_EXTRACT:
11446 tem = expand_compound_operation (op0);
11447 if (tem != op0)
11449 op0 = tem;
11450 continue;
11452 break;
11454 case NOT:
11455 /* If testing for equality, we can take the NOT of the constant. */
11456 if (equality_comparison_p
11457 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11459 op0 = XEXP (op0, 0);
11460 op1 = tem;
11461 continue;
11464 /* If just looking at the sign bit, reverse the sense of the
11465 comparison. */
11466 if (sign_bit_comparison_p)
11468 op0 = XEXP (op0, 0);
11469 code = (code == GE ? LT : GE);
11470 continue;
11472 break;
11474 case NEG:
11475 /* If testing for equality, we can take the NEG of the constant. */
11476 if (equality_comparison_p
11477 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11479 op0 = XEXP (op0, 0);
11480 op1 = tem;
11481 continue;
11484 /* The remaining cases only apply to comparisons with zero. */
11485 if (const_op != 0)
11486 break;
11488 /* When X is ABS or is known positive,
11489 (neg X) is < 0 if and only if X != 0. */
11491 if (sign_bit_comparison_p
11492 && (GET_CODE (XEXP (op0, 0)) == ABS
11493 || (mode_width <= HOST_BITS_PER_WIDE_INT
11494 && (nonzero_bits (XEXP (op0, 0), mode)
11495 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11496 == 0)))
11498 op0 = XEXP (op0, 0);
11499 code = (code == LT ? NE : EQ);
11500 continue;
11503 /* If we have NEG of something whose two high-order bits are the
11504 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11505 if (num_sign_bit_copies (op0, mode) >= 2)
11507 op0 = XEXP (op0, 0);
11508 code = swap_condition (code);
11509 continue;
11511 break;
11513 case ROTATE:
11514 /* If we are testing equality and our count is a constant, we
11515 can perform the inverse operation on our RHS. */
11516 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11517 && (tem = simplify_binary_operation (ROTATERT, mode,
11518 op1, XEXP (op0, 1))) != 0)
11520 op0 = XEXP (op0, 0);
11521 op1 = tem;
11522 continue;
11525 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11526 a particular bit. Convert it to an AND of a constant of that
11527 bit. This will be converted into a ZERO_EXTRACT. */
11528 if (const_op == 0 && sign_bit_comparison_p
11529 && CONST_INT_P (XEXP (op0, 1))
11530 && mode_width <= HOST_BITS_PER_WIDE_INT)
11532 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11533 ((unsigned HOST_WIDE_INT) 1
11534 << (mode_width - 1
11535 - INTVAL (XEXP (op0, 1)))));
11536 code = (code == LT ? NE : EQ);
11537 continue;
11540 /* Fall through. */
11542 case ABS:
11543 /* ABS is ignorable inside an equality comparison with zero. */
11544 if (const_op == 0 && equality_comparison_p)
11546 op0 = XEXP (op0, 0);
11547 continue;
11549 break;
11551 case SIGN_EXTEND:
11552 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11553 (compare FOO CONST) if CONST fits in FOO's mode and we
11554 are either testing inequality or have an unsigned
11555 comparison with ZERO_EXTEND or a signed comparison with
11556 SIGN_EXTEND. But don't do it if we don't have a compare
11557 insn of the given mode, since we'd have to revert it
11558 later on, and then we wouldn't know whether to sign- or
11559 zero-extend. */
11560 mode = GET_MODE (XEXP (op0, 0));
11561 if (GET_MODE_CLASS (mode) == MODE_INT
11562 && ! unsigned_comparison_p
11563 && HWI_COMPUTABLE_MODE_P (mode)
11564 && trunc_int_for_mode (const_op, mode) == const_op
11565 && have_insn_for (COMPARE, mode))
11567 op0 = XEXP (op0, 0);
11568 continue;
11570 break;
11572 case SUBREG:
11573 /* Check for the case where we are comparing A - C1 with C2, that is
11575 (subreg:MODE (plus (A) (-C1))) op (C2)
11577 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11578 comparison in the wider mode. One of the following two conditions
11579 must be true in order for this to be valid:
11581 1. The mode extension results in the same bit pattern being added
11582 on both sides and the comparison is equality or unsigned. As
11583 C2 has been truncated to fit in MODE, the pattern can only be
11584 all 0s or all 1s.
11586 2. The mode extension results in the sign bit being copied on
11587 each side.
11589 The difficulty here is that we have predicates for A but not for
11590 (A - C1) so we need to check that C1 is within proper bounds so
11591 as to perturbate A as little as possible. */
11593 if (mode_width <= HOST_BITS_PER_WIDE_INT
11594 && subreg_lowpart_p (op0)
11595 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
11596 && GET_CODE (SUBREG_REG (op0)) == PLUS
11597 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11599 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11600 rtx a = XEXP (SUBREG_REG (op0), 0);
11601 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11603 if ((c1 > 0
11604 && (unsigned HOST_WIDE_INT) c1
11605 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11606 && (equality_comparison_p || unsigned_comparison_p)
11607 /* (A - C1) zero-extends if it is positive and sign-extends
11608 if it is negative, C2 both zero- and sign-extends. */
11609 && ((0 == (nonzero_bits (a, inner_mode)
11610 & ~GET_MODE_MASK (mode))
11611 && const_op >= 0)
11612 /* (A - C1) sign-extends if it is positive and 1-extends
11613 if it is negative, C2 both sign- and 1-extends. */
11614 || (num_sign_bit_copies (a, inner_mode)
11615 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11616 - mode_width)
11617 && const_op < 0)))
11618 || ((unsigned HOST_WIDE_INT) c1
11619 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11620 /* (A - C1) always sign-extends, like C2. */
11621 && num_sign_bit_copies (a, inner_mode)
11622 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11623 - (mode_width - 1))))
11625 op0 = SUBREG_REG (op0);
11626 continue;
11630 /* If the inner mode is narrower and we are extracting the low part,
11631 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11632 if (subreg_lowpart_p (op0)
11633 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
11634 /* Fall through */ ;
11635 else
11636 break;
11638 /* ... fall through ... */
11640 case ZERO_EXTEND:
11641 mode = GET_MODE (XEXP (op0, 0));
11642 if (GET_MODE_CLASS (mode) == MODE_INT
11643 && (unsigned_comparison_p || equality_comparison_p)
11644 && HWI_COMPUTABLE_MODE_P (mode)
11645 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
11646 && const_op >= 0
11647 && have_insn_for (COMPARE, mode))
11649 op0 = XEXP (op0, 0);
11650 continue;
11652 break;
11654 case PLUS:
11655 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11656 this for equality comparisons due to pathological cases involving
11657 overflows. */
11658 if (equality_comparison_p
11659 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11660 op1, XEXP (op0, 1))))
11662 op0 = XEXP (op0, 0);
11663 op1 = tem;
11664 continue;
11667 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11668 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11669 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11671 op0 = XEXP (XEXP (op0, 0), 0);
11672 code = (code == LT ? EQ : NE);
11673 continue;
11675 break;
11677 case MINUS:
11678 /* We used to optimize signed comparisons against zero, but that
11679 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11680 arrive here as equality comparisons, or (GEU, LTU) are
11681 optimized away. No need to special-case them. */
11683 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11684 (eq B (minus A C)), whichever simplifies. We can only do
11685 this for equality comparisons due to pathological cases involving
11686 overflows. */
11687 if (equality_comparison_p
11688 && 0 != (tem = simplify_binary_operation (PLUS, mode,
11689 XEXP (op0, 1), op1)))
11691 op0 = XEXP (op0, 0);
11692 op1 = tem;
11693 continue;
11696 if (equality_comparison_p
11697 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11698 XEXP (op0, 0), op1)))
11700 op0 = XEXP (op0, 1);
11701 op1 = tem;
11702 continue;
11705 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11706 of bits in X minus 1, is one iff X > 0. */
11707 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
11708 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11709 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
11710 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11712 op0 = XEXP (op0, 1);
11713 code = (code == GE ? LE : GT);
11714 continue;
11716 break;
11718 case XOR:
11719 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11720 if C is zero or B is a constant. */
11721 if (equality_comparison_p
11722 && 0 != (tem = simplify_binary_operation (XOR, mode,
11723 XEXP (op0, 1), op1)))
11725 op0 = XEXP (op0, 0);
11726 op1 = tem;
11727 continue;
11729 break;
11731 case EQ: case NE:
11732 case UNEQ: case LTGT:
11733 case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
11734 case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
11735 case UNORDERED: case ORDERED:
11736 /* We can't do anything if OP0 is a condition code value, rather
11737 than an actual data value. */
11738 if (const_op != 0
11739 || CC0_P (XEXP (op0, 0))
11740 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
11741 break;
11743 /* Get the two operands being compared. */
11744 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
11745 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
11746 else
11747 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
11749 /* Check for the cases where we simply want the result of the
11750 earlier test or the opposite of that result. */
11751 if (code == NE || code == EQ
11752 || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
11753 && (code == LT || code == GE)))
11755 enum rtx_code new_code;
11756 if (code == LT || code == NE)
11757 new_code = GET_CODE (op0);
11758 else
11759 new_code = reversed_comparison_code (op0, NULL);
11761 if (new_code != UNKNOWN)
11763 code = new_code;
11764 op0 = tem;
11765 op1 = tem1;
11766 continue;
11769 break;
11771 case IOR:
11772 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11773 iff X <= 0. */
11774 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
11775 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
11776 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11778 op0 = XEXP (op0, 1);
11779 code = (code == GE ? GT : LE);
11780 continue;
11782 break;
11784 case AND:
11785 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11786 will be converted to a ZERO_EXTRACT later. */
11787 if (const_op == 0 && equality_comparison_p
11788 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11789 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
11791 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
11792 XEXP (XEXP (op0, 0), 1));
11793 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11794 continue;
11797 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11798 zero and X is a comparison and C1 and C2 describe only bits set
11799 in STORE_FLAG_VALUE, we can compare with X. */
11800 if (const_op == 0 && equality_comparison_p
11801 && mode_width <= HOST_BITS_PER_WIDE_INT
11802 && CONST_INT_P (XEXP (op0, 1))
11803 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
11804 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11805 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
11806 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
11808 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11809 << INTVAL (XEXP (XEXP (op0, 0), 1)));
11810 if ((~STORE_FLAG_VALUE & mask) == 0
11811 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
11812 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
11813 && COMPARISON_P (tem))))
11815 op0 = XEXP (XEXP (op0, 0), 0);
11816 continue;
11820 /* If we are doing an equality comparison of an AND of a bit equal
11821 to the sign bit, replace this with a LT or GE comparison of
11822 the underlying value. */
11823 if (equality_comparison_p
11824 && const_op == 0
11825 && CONST_INT_P (XEXP (op0, 1))
11826 && mode_width <= HOST_BITS_PER_WIDE_INT
11827 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11828 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11830 op0 = XEXP (op0, 0);
11831 code = (code == EQ ? GE : LT);
11832 continue;
11835 /* If this AND operation is really a ZERO_EXTEND from a narrower
11836 mode, the constant fits within that mode, and this is either an
11837 equality or unsigned comparison, try to do this comparison in
11838 the narrower mode.
11840 Note that in:
11842 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11843 -> (ne:DI (reg:SI 4) (const_int 0))
11845 unless TRULY_NOOP_TRUNCATION allows it or the register is
11846 known to hold a value of the required mode the
11847 transformation is invalid. */
11848 if ((equality_comparison_p || unsigned_comparison_p)
11849 && CONST_INT_P (XEXP (op0, 1))
11850 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
11851 & GET_MODE_MASK (mode))
11852 + 1)) >= 0
11853 && const_op >> i == 0
11854 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
11855 && (TRULY_NOOP_TRUNCATION_MODES_P (tmode, GET_MODE (op0))
11856 || (REG_P (XEXP (op0, 0))
11857 && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
11859 op0 = gen_lowpart (tmode, XEXP (op0, 0));
11860 continue;
11863 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11864 fits in both M1 and M2 and the SUBREG is either paradoxical
11865 or represents the low part, permute the SUBREG and the AND
11866 and try again. */
11867 if (GET_CODE (XEXP (op0, 0)) == SUBREG)
11869 unsigned HOST_WIDE_INT c1;
11870 tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
11871 /* Require an integral mode, to avoid creating something like
11872 (AND:SF ...). */
11873 if (SCALAR_INT_MODE_P (tmode)
11874 /* It is unsafe to commute the AND into the SUBREG if the
11875 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11876 not defined. As originally written the upper bits
11877 have a defined value due to the AND operation.
11878 However, if we commute the AND inside the SUBREG then
11879 they no longer have defined values and the meaning of
11880 the code has been changed. */
11881 && (0
11882 #ifdef WORD_REGISTER_OPERATIONS
11883 || (mode_width > GET_MODE_PRECISION (tmode)
11884 && mode_width <= BITS_PER_WORD)
11885 #endif
11886 || (mode_width <= GET_MODE_PRECISION (tmode)
11887 && subreg_lowpart_p (XEXP (op0, 0))))
11888 && CONST_INT_P (XEXP (op0, 1))
11889 && mode_width <= HOST_BITS_PER_WIDE_INT
11890 && HWI_COMPUTABLE_MODE_P (tmode)
11891 && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0
11892 && (c1 & ~GET_MODE_MASK (tmode)) == 0
11893 && c1 != mask
11894 && c1 != GET_MODE_MASK (tmode))
11896 op0 = simplify_gen_binary (AND, tmode,
11897 SUBREG_REG (XEXP (op0, 0)),
11898 gen_int_mode (c1, tmode));
11899 op0 = gen_lowpart (mode, op0);
11900 continue;
11904 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
11905 if (const_op == 0 && equality_comparison_p
11906 && XEXP (op0, 1) == const1_rtx
11907 && GET_CODE (XEXP (op0, 0)) == NOT)
11909 op0 = simplify_and_const_int (NULL_RTX, mode,
11910 XEXP (XEXP (op0, 0), 0), 1);
11911 code = (code == NE ? EQ : NE);
11912 continue;
11915 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
11916 (eq (and (lshiftrt X) 1) 0).
11917 Also handle the case where (not X) is expressed using xor. */
11918 if (const_op == 0 && equality_comparison_p
11919 && XEXP (op0, 1) == const1_rtx
11920 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
11922 rtx shift_op = XEXP (XEXP (op0, 0), 0);
11923 rtx shift_count = XEXP (XEXP (op0, 0), 1);
11925 if (GET_CODE (shift_op) == NOT
11926 || (GET_CODE (shift_op) == XOR
11927 && CONST_INT_P (XEXP (shift_op, 1))
11928 && CONST_INT_P (shift_count)
11929 && HWI_COMPUTABLE_MODE_P (mode)
11930 && (UINTVAL (XEXP (shift_op, 1))
11931 == (unsigned HOST_WIDE_INT) 1
11932 << INTVAL (shift_count))))
11935 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
11936 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11937 code = (code == NE ? EQ : NE);
11938 continue;
11941 break;
11943 case ASHIFT:
11944 /* If we have (compare (ashift FOO N) (const_int C)) and
11945 the high order N bits of FOO (N+1 if an inequality comparison)
11946 are known to be zero, we can do this by comparing FOO with C
11947 shifted right N bits so long as the low-order N bits of C are
11948 zero. */
11949 if (CONST_INT_P (XEXP (op0, 1))
11950 && INTVAL (XEXP (op0, 1)) >= 0
11951 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
11952 < HOST_BITS_PER_WIDE_INT)
11953 && (((unsigned HOST_WIDE_INT) const_op
11954 & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
11955 - 1)) == 0)
11956 && mode_width <= HOST_BITS_PER_WIDE_INT
11957 && (nonzero_bits (XEXP (op0, 0), mode)
11958 & ~(mask >> (INTVAL (XEXP (op0, 1))
11959 + ! equality_comparison_p))) == 0)
11961 /* We must perform a logical shift, not an arithmetic one,
11962 as we want the top N bits of C to be zero. */
11963 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
11965 temp >>= INTVAL (XEXP (op0, 1));
11966 op1 = gen_int_mode (temp, mode);
11967 op0 = XEXP (op0, 0);
11968 continue;
11971 /* If we are doing a sign bit comparison, it means we are testing
11972 a particular bit. Convert it to the appropriate AND. */
11973 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
11974 && mode_width <= HOST_BITS_PER_WIDE_INT)
11976 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11977 ((unsigned HOST_WIDE_INT) 1
11978 << (mode_width - 1
11979 - INTVAL (XEXP (op0, 1)))));
11980 code = (code == LT ? NE : EQ);
11981 continue;
11984 /* If this an equality comparison with zero and we are shifting
11985 the low bit to the sign bit, we can convert this to an AND of the
11986 low-order bit. */
11987 if (const_op == 0 && equality_comparison_p
11988 && CONST_INT_P (XEXP (op0, 1))
11989 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11991 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
11992 continue;
11994 break;
11996 case ASHIFTRT:
11997 /* If this is an equality comparison with zero, we can do this
11998 as a logical shift, which might be much simpler. */
11999 if (equality_comparison_p && const_op == 0
12000 && CONST_INT_P (XEXP (op0, 1)))
12002 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12003 XEXP (op0, 0),
12004 INTVAL (XEXP (op0, 1)));
12005 continue;
12008 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12009 do the comparison in a narrower mode. */
12010 if (! unsigned_comparison_p
12011 && CONST_INT_P (XEXP (op0, 1))
12012 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12013 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12014 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12015 MODE_INT, 1)) != BLKmode
12016 && (((unsigned HOST_WIDE_INT) const_op
12017 + (GET_MODE_MASK (tmode) >> 1) + 1)
12018 <= GET_MODE_MASK (tmode)))
12020 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12021 continue;
12024 /* Likewise if OP0 is a PLUS of a sign extension with a
12025 constant, which is usually represented with the PLUS
12026 between the shifts. */
12027 if (! unsigned_comparison_p
12028 && CONST_INT_P (XEXP (op0, 1))
12029 && GET_CODE (XEXP (op0, 0)) == PLUS
12030 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12031 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12032 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12033 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12034 MODE_INT, 1)) != BLKmode
12035 && (((unsigned HOST_WIDE_INT) const_op
12036 + (GET_MODE_MASK (tmode) >> 1) + 1)
12037 <= GET_MODE_MASK (tmode)))
12039 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12040 rtx add_const = XEXP (XEXP (op0, 0), 1);
12041 rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
12042 add_const, XEXP (op0, 1));
12044 op0 = simplify_gen_binary (PLUS, tmode,
12045 gen_lowpart (tmode, inner),
12046 new_const);
12047 continue;
12050 /* ... fall through ... */
12051 case LSHIFTRT:
12052 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12053 the low order N bits of FOO are known to be zero, we can do this
12054 by comparing FOO with C shifted left N bits so long as no
12055 overflow occurs. Even if the low order N bits of FOO aren't known
12056 to be zero, if the comparison is >= or < we can use the same
12057 optimization and for > or <= by setting all the low
12058 order N bits in the comparison constant. */
12059 if (CONST_INT_P (XEXP (op0, 1))
12060 && INTVAL (XEXP (op0, 1)) > 0
12061 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12062 && mode_width <= HOST_BITS_PER_WIDE_INT
12063 && (((unsigned HOST_WIDE_INT) const_op
12064 + (GET_CODE (op0) != LSHIFTRT
12065 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12066 + 1)
12067 : 0))
12068 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12070 unsigned HOST_WIDE_INT low_bits
12071 = (nonzero_bits (XEXP (op0, 0), mode)
12072 & (((unsigned HOST_WIDE_INT) 1
12073 << INTVAL (XEXP (op0, 1))) - 1));
12074 if (low_bits == 0 || !equality_comparison_p)
12076 /* If the shift was logical, then we must make the condition
12077 unsigned. */
12078 if (GET_CODE (op0) == LSHIFTRT)
12079 code = unsigned_condition (code);
12081 const_op <<= INTVAL (XEXP (op0, 1));
12082 if (low_bits != 0
12083 && (code == GT || code == GTU
12084 || code == LE || code == LEU))
12085 const_op
12086 |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
12087 op1 = GEN_INT (const_op);
12088 op0 = XEXP (op0, 0);
12089 continue;
12093 /* If we are using this shift to extract just the sign bit, we
12094 can replace this with an LT or GE comparison. */
12095 if (const_op == 0
12096 && (equality_comparison_p || sign_bit_comparison_p)
12097 && CONST_INT_P (XEXP (op0, 1))
12098 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12100 op0 = XEXP (op0, 0);
12101 code = (code == NE || code == GT ? LT : GE);
12102 continue;
12104 break;
12106 default:
12107 break;
12110 break;
12113 /* Now make any compound operations involved in this comparison. Then,
12114 check for an outmost SUBREG on OP0 that is not doing anything or is
12115 paradoxical. The latter transformation must only be performed when
12116 it is known that the "extra" bits will be the same in op0 and op1 or
12117 that they don't matter. There are three cases to consider:
12119 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12120 care bits and we can assume they have any convenient value. So
12121 making the transformation is safe.
12123 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
12124 In this case the upper bits of op0 are undefined. We should not make
12125 the simplification in that case as we do not know the contents of
12126 those bits.
12128 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
12129 UNKNOWN. In that case we know those bits are zeros or ones. We must
12130 also be sure that they are the same as the upper bits of op1.
12132 We can never remove a SUBREG for a non-equality comparison because
12133 the sign bit is in a different place in the underlying object. */
12135 op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
12136 op1 = make_compound_operation (op1, SET);
12138 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12139 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
12140 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
12141 && (code == NE || code == EQ))
12143 if (paradoxical_subreg_p (op0))
12145 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12146 implemented. */
12147 if (REG_P (SUBREG_REG (op0)))
12149 op0 = SUBREG_REG (op0);
12150 op1 = gen_lowpart (GET_MODE (op0), op1);
12153 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
12154 <= HOST_BITS_PER_WIDE_INT)
12155 && (nonzero_bits (SUBREG_REG (op0),
12156 GET_MODE (SUBREG_REG (op0)))
12157 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12159 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
12161 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
12162 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12163 op0 = SUBREG_REG (op0), op1 = tem;
12167 /* We now do the opposite procedure: Some machines don't have compare
12168 insns in all modes. If OP0's mode is an integer mode smaller than a
12169 word and we can't do a compare in that mode, see if there is a larger
12170 mode for which we can do the compare. There are a number of cases in
12171 which we can use the wider mode. */
12173 mode = GET_MODE (op0);
12174 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
12175 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12176 && ! have_insn_for (COMPARE, mode))
12177 for (tmode = GET_MODE_WIDER_MODE (mode);
12178 (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
12179 tmode = GET_MODE_WIDER_MODE (tmode))
12180 if (have_insn_for (COMPARE, tmode))
12182 int zero_extended;
12184 /* If this is a test for negative, we can make an explicit
12185 test of the sign bit. Test this first so we can use
12186 a paradoxical subreg to extend OP0. */
12188 if (op1 == const0_rtx && (code == LT || code == GE)
12189 && HWI_COMPUTABLE_MODE_P (mode))
12191 unsigned HOST_WIDE_INT sign
12192 = (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1);
12193 op0 = simplify_gen_binary (AND, tmode,
12194 gen_lowpart (tmode, op0),
12195 gen_int_mode (sign, tmode));
12196 code = (code == LT) ? NE : EQ;
12197 break;
12200 /* If the only nonzero bits in OP0 and OP1 are those in the
12201 narrower mode and this is an equality or unsigned comparison,
12202 we can use the wider mode. Similarly for sign-extended
12203 values, in which case it is true for all comparisons. */
12204 zero_extended = ((code == EQ || code == NE
12205 || code == GEU || code == GTU
12206 || code == LEU || code == LTU)
12207 && (nonzero_bits (op0, tmode)
12208 & ~GET_MODE_MASK (mode)) == 0
12209 && ((CONST_INT_P (op1)
12210 || (nonzero_bits (op1, tmode)
12211 & ~GET_MODE_MASK (mode)) == 0)));
12213 if (zero_extended
12214 || ((num_sign_bit_copies (op0, tmode)
12215 > (unsigned int) (GET_MODE_PRECISION (tmode)
12216 - GET_MODE_PRECISION (mode)))
12217 && (num_sign_bit_copies (op1, tmode)
12218 > (unsigned int) (GET_MODE_PRECISION (tmode)
12219 - GET_MODE_PRECISION (mode)))))
12221 /* If OP0 is an AND and we don't have an AND in MODE either,
12222 make a new AND in the proper mode. */
12223 if (GET_CODE (op0) == AND
12224 && !have_insn_for (AND, mode))
12225 op0 = simplify_gen_binary (AND, tmode,
12226 gen_lowpart (tmode,
12227 XEXP (op0, 0)),
12228 gen_lowpart (tmode,
12229 XEXP (op0, 1)));
12230 else
12232 if (zero_extended)
12234 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
12235 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
12237 else
12239 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
12240 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
12242 break;
12247 /* We may have changed the comparison operands. Re-canonicalize. */
12248 if (swap_commutative_operands_p (op0, op1))
12250 tem = op0, op0 = op1, op1 = tem;
12251 code = swap_condition (code);
12254 /* If this machine only supports a subset of valid comparisons, see if we
12255 can convert an unsupported one into a supported one. */
12256 target_canonicalize_comparison (&code, &op0, &op1, 0);
12258 *pop0 = op0;
12259 *pop1 = op1;
12261 return code;
12264 /* Utility function for record_value_for_reg. Count number of
12265 rtxs in X. */
12266 static int
12267 count_rtxs (rtx x)
12269 enum rtx_code code = GET_CODE (x);
12270 const char *fmt;
12271 int i, j, ret = 1;
12273 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12274 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12276 rtx x0 = XEXP (x, 0);
12277 rtx x1 = XEXP (x, 1);
12279 if (x0 == x1)
12280 return 1 + 2 * count_rtxs (x0);
12282 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12283 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12284 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12285 return 2 + 2 * count_rtxs (x0)
12286 + count_rtxs (x == XEXP (x1, 0)
12287 ? XEXP (x1, 1) : XEXP (x1, 0));
12289 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12290 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12291 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12292 return 2 + 2 * count_rtxs (x1)
12293 + count_rtxs (x == XEXP (x0, 0)
12294 ? XEXP (x0, 1) : XEXP (x0, 0));
12297 fmt = GET_RTX_FORMAT (code);
12298 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12299 if (fmt[i] == 'e')
12300 ret += count_rtxs (XEXP (x, i));
12301 else if (fmt[i] == 'E')
12302 for (j = 0; j < XVECLEN (x, i); j++)
12303 ret += count_rtxs (XVECEXP (x, i, j));
12305 return ret;
12308 /* Utility function for following routine. Called when X is part of a value
12309 being stored into last_set_value. Sets last_set_table_tick
12310 for each register mentioned. Similar to mention_regs in cse.c */
12312 static void
12313 update_table_tick (rtx x)
12315 enum rtx_code code = GET_CODE (x);
12316 const char *fmt = GET_RTX_FORMAT (code);
12317 int i, j;
12319 if (code == REG)
12321 unsigned int regno = REGNO (x);
12322 unsigned int endregno = END_REGNO (x);
12323 unsigned int r;
12325 for (r = regno; r < endregno; r++)
12327 reg_stat_type *rsp = &reg_stat[r];
12328 rsp->last_set_table_tick = label_tick;
12331 return;
12334 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12335 if (fmt[i] == 'e')
12337 /* Check for identical subexpressions. If x contains
12338 identical subexpression we only have to traverse one of
12339 them. */
12340 if (i == 0 && ARITHMETIC_P (x))
12342 /* Note that at this point x1 has already been
12343 processed. */
12344 rtx x0 = XEXP (x, 0);
12345 rtx x1 = XEXP (x, 1);
12347 /* If x0 and x1 are identical then there is no need to
12348 process x0. */
12349 if (x0 == x1)
12350 break;
12352 /* If x0 is identical to a subexpression of x1 then while
12353 processing x1, x0 has already been processed. Thus we
12354 are done with x. */
12355 if (ARITHMETIC_P (x1)
12356 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12357 break;
12359 /* If x1 is identical to a subexpression of x0 then we
12360 still have to process the rest of x0. */
12361 if (ARITHMETIC_P (x0)
12362 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12364 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12365 break;
12369 update_table_tick (XEXP (x, i));
12371 else if (fmt[i] == 'E')
12372 for (j = 0; j < XVECLEN (x, i); j++)
12373 update_table_tick (XVECEXP (x, i, j));
12376 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12377 are saying that the register is clobbered and we no longer know its
12378 value. If INSN is zero, don't update reg_stat[].last_set; this is
12379 only permitted with VALUE also zero and is used to invalidate the
12380 register. */
12382 static void
12383 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
12385 unsigned int regno = REGNO (reg);
12386 unsigned int endregno = END_REGNO (reg);
12387 unsigned int i;
12388 reg_stat_type *rsp;
12390 /* If VALUE contains REG and we have a previous value for REG, substitute
12391 the previous value. */
12392 if (value && insn && reg_overlap_mentioned_p (reg, value))
12394 rtx tem;
12396 /* Set things up so get_last_value is allowed to see anything set up to
12397 our insn. */
12398 subst_low_luid = DF_INSN_LUID (insn);
12399 tem = get_last_value (reg);
12401 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12402 it isn't going to be useful and will take a lot of time to process,
12403 so just use the CLOBBER. */
12405 if (tem)
12407 if (ARITHMETIC_P (tem)
12408 && GET_CODE (XEXP (tem, 0)) == CLOBBER
12409 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12410 tem = XEXP (tem, 0);
12411 else if (count_occurrences (value, reg, 1) >= 2)
12413 /* If there are two or more occurrences of REG in VALUE,
12414 prevent the value from growing too much. */
12415 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12416 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12419 value = replace_rtx (copy_rtx (value), reg, tem);
12423 /* For each register modified, show we don't know its value, that
12424 we don't know about its bitwise content, that its value has been
12425 updated, and that we don't know the location of the death of the
12426 register. */
12427 for (i = regno; i < endregno; i++)
12429 rsp = &reg_stat[i];
12431 if (insn)
12432 rsp->last_set = insn;
12434 rsp->last_set_value = 0;
12435 rsp->last_set_mode = VOIDmode;
12436 rsp->last_set_nonzero_bits = 0;
12437 rsp->last_set_sign_bit_copies = 0;
12438 rsp->last_death = 0;
12439 rsp->truncated_to_mode = VOIDmode;
12442 /* Mark registers that are being referenced in this value. */
12443 if (value)
12444 update_table_tick (value);
12446 /* Now update the status of each register being set.
12447 If someone is using this register in this block, set this register
12448 to invalid since we will get confused between the two lives in this
12449 basic block. This makes using this register always invalid. In cse, we
12450 scan the table to invalidate all entries using this register, but this
12451 is too much work for us. */
12453 for (i = regno; i < endregno; i++)
12455 rsp = &reg_stat[i];
12456 rsp->last_set_label = label_tick;
12457 if (!insn
12458 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12459 rsp->last_set_invalid = 1;
12460 else
12461 rsp->last_set_invalid = 0;
12464 /* The value being assigned might refer to X (like in "x++;"). In that
12465 case, we must replace it with (clobber (const_int 0)) to prevent
12466 infinite loops. */
12467 rsp = &reg_stat[regno];
12468 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12470 value = copy_rtx (value);
12471 if (!get_last_value_validate (&value, insn, label_tick, 1))
12472 value = 0;
12475 /* For the main register being modified, update the value, the mode, the
12476 nonzero bits, and the number of sign bit copies. */
12478 rsp->last_set_value = value;
12480 if (value)
12482 machine_mode mode = GET_MODE (reg);
12483 subst_low_luid = DF_INSN_LUID (insn);
12484 rsp->last_set_mode = mode;
12485 if (GET_MODE_CLASS (mode) == MODE_INT
12486 && HWI_COMPUTABLE_MODE_P (mode))
12487 mode = nonzero_bits_mode;
12488 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12489 rsp->last_set_sign_bit_copies
12490 = num_sign_bit_copies (value, GET_MODE (reg));
12494 /* Called via note_stores from record_dead_and_set_regs to handle one
12495 SET or CLOBBER in an insn. DATA is the instruction in which the
12496 set is occurring. */
12498 static void
12499 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12501 rtx_insn *record_dead_insn = (rtx_insn *) data;
12503 if (GET_CODE (dest) == SUBREG)
12504 dest = SUBREG_REG (dest);
12506 if (!record_dead_insn)
12508 if (REG_P (dest))
12509 record_value_for_reg (dest, NULL, NULL_RTX);
12510 return;
12513 if (REG_P (dest))
12515 /* If we are setting the whole register, we know its value. Otherwise
12516 show that we don't know the value. We can handle SUBREG in
12517 some cases. */
12518 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12519 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12520 else if (GET_CODE (setter) == SET
12521 && GET_CODE (SET_DEST (setter)) == SUBREG
12522 && SUBREG_REG (SET_DEST (setter)) == dest
12523 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
12524 && subreg_lowpart_p (SET_DEST (setter)))
12525 record_value_for_reg (dest, record_dead_insn,
12526 gen_lowpart (GET_MODE (dest),
12527 SET_SRC (setter)));
12528 else
12529 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12531 else if (MEM_P (dest)
12532 /* Ignore pushes, they clobber nothing. */
12533 && ! push_operand (dest, GET_MODE (dest)))
12534 mem_last_set = DF_INSN_LUID (record_dead_insn);
12537 /* Update the records of when each REG was most recently set or killed
12538 for the things done by INSN. This is the last thing done in processing
12539 INSN in the combiner loop.
12541 We update reg_stat[], in particular fields last_set, last_set_value,
12542 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12543 last_death, and also the similar information mem_last_set (which insn
12544 most recently modified memory) and last_call_luid (which insn was the
12545 most recent subroutine call). */
12547 static void
12548 record_dead_and_set_regs (rtx_insn *insn)
12550 rtx link;
12551 unsigned int i;
12553 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12555 if (REG_NOTE_KIND (link) == REG_DEAD
12556 && REG_P (XEXP (link, 0)))
12558 unsigned int regno = REGNO (XEXP (link, 0));
12559 unsigned int endregno = END_REGNO (XEXP (link, 0));
12561 for (i = regno; i < endregno; i++)
12563 reg_stat_type *rsp;
12565 rsp = &reg_stat[i];
12566 rsp->last_death = insn;
12569 else if (REG_NOTE_KIND (link) == REG_INC)
12570 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12573 if (CALL_P (insn))
12575 hard_reg_set_iterator hrsi;
12576 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
12578 reg_stat_type *rsp;
12580 rsp = &reg_stat[i];
12581 rsp->last_set_invalid = 1;
12582 rsp->last_set = insn;
12583 rsp->last_set_value = 0;
12584 rsp->last_set_mode = VOIDmode;
12585 rsp->last_set_nonzero_bits = 0;
12586 rsp->last_set_sign_bit_copies = 0;
12587 rsp->last_death = 0;
12588 rsp->truncated_to_mode = VOIDmode;
12591 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12593 /* We can't combine into a call pattern. Remember, though, that
12594 the return value register is set at this LUID. We could
12595 still replace a register with the return value from the
12596 wrong subroutine call! */
12597 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12599 else
12600 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12603 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12604 register present in the SUBREG, so for each such SUBREG go back and
12605 adjust nonzero and sign bit information of the registers that are
12606 known to have some zero/sign bits set.
12608 This is needed because when combine blows the SUBREGs away, the
12609 information on zero/sign bits is lost and further combines can be
12610 missed because of that. */
12612 static void
12613 record_promoted_value (rtx_insn *insn, rtx subreg)
12615 struct insn_link *links;
12616 rtx set;
12617 unsigned int regno = REGNO (SUBREG_REG (subreg));
12618 machine_mode mode = GET_MODE (subreg);
12620 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
12621 return;
12623 for (links = LOG_LINKS (insn); links;)
12625 reg_stat_type *rsp;
12627 insn = links->insn;
12628 set = single_set (insn);
12630 if (! set || !REG_P (SET_DEST (set))
12631 || REGNO (SET_DEST (set)) != regno
12632 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12634 links = links->next;
12635 continue;
12638 rsp = &reg_stat[regno];
12639 if (rsp->last_set == insn)
12641 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
12642 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12645 if (REG_P (SET_SRC (set)))
12647 regno = REGNO (SET_SRC (set));
12648 links = LOG_LINKS (insn);
12650 else
12651 break;
12655 /* Check if X, a register, is known to contain a value already
12656 truncated to MODE. In this case we can use a subreg to refer to
12657 the truncated value even though in the generic case we would need
12658 an explicit truncation. */
12660 static bool
12661 reg_truncated_to_mode (machine_mode mode, const_rtx x)
12663 reg_stat_type *rsp = &reg_stat[REGNO (x)];
12664 machine_mode truncated = rsp->truncated_to_mode;
12666 if (truncated == 0
12667 || rsp->truncation_label < label_tick_ebb_start)
12668 return false;
12669 if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12670 return true;
12671 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
12672 return true;
12673 return false;
12676 /* If X is a hard reg or a subreg record the mode that the register is
12677 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
12678 to turn a truncate into a subreg using this information. Return true
12679 if traversing X is complete. */
12681 static bool
12682 record_truncated_value (rtx x)
12684 machine_mode truncated_mode;
12685 reg_stat_type *rsp;
12687 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12689 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12690 truncated_mode = GET_MODE (x);
12692 if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12693 return true;
12695 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
12696 return true;
12698 x = SUBREG_REG (x);
12700 /* ??? For hard-regs we now record everything. We might be able to
12701 optimize this using last_set_mode. */
12702 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
12703 truncated_mode = GET_MODE (x);
12704 else
12705 return false;
12707 rsp = &reg_stat[REGNO (x)];
12708 if (rsp->truncated_to_mode == 0
12709 || rsp->truncation_label < label_tick_ebb_start
12710 || (GET_MODE_SIZE (truncated_mode)
12711 < GET_MODE_SIZE (rsp->truncated_to_mode)))
12713 rsp->truncated_to_mode = truncated_mode;
12714 rsp->truncation_label = label_tick;
12717 return true;
12720 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12721 the modes they are used in. This can help truning TRUNCATEs into
12722 SUBREGs. */
12724 static void
12725 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
12727 subrtx_var_iterator::array_type array;
12728 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
12729 if (record_truncated_value (*iter))
12730 iter.skip_subrtxes ();
12733 /* Scan X for promoted SUBREGs. For each one found,
12734 note what it implies to the registers used in it. */
12736 static void
12737 check_promoted_subreg (rtx_insn *insn, rtx x)
12739 if (GET_CODE (x) == SUBREG
12740 && SUBREG_PROMOTED_VAR_P (x)
12741 && REG_P (SUBREG_REG (x)))
12742 record_promoted_value (insn, x);
12743 else
12745 const char *format = GET_RTX_FORMAT (GET_CODE (x));
12746 int i, j;
12748 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
12749 switch (format[i])
12751 case 'e':
12752 check_promoted_subreg (insn, XEXP (x, i));
12753 break;
12754 case 'V':
12755 case 'E':
12756 if (XVEC (x, i) != 0)
12757 for (j = 0; j < XVECLEN (x, i); j++)
12758 check_promoted_subreg (insn, XVECEXP (x, i, j));
12759 break;
12764 /* Verify that all the registers and memory references mentioned in *LOC are
12765 still valid. *LOC was part of a value set in INSN when label_tick was
12766 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12767 the invalid references with (clobber (const_int 0)) and return 1. This
12768 replacement is useful because we often can get useful information about
12769 the form of a value (e.g., if it was produced by a shift that always
12770 produces -1 or 0) even though we don't know exactly what registers it
12771 was produced from. */
12773 static int
12774 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
12776 rtx x = *loc;
12777 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
12778 int len = GET_RTX_LENGTH (GET_CODE (x));
12779 int i, j;
12781 if (REG_P (x))
12783 unsigned int regno = REGNO (x);
12784 unsigned int endregno = END_REGNO (x);
12785 unsigned int j;
12787 for (j = regno; j < endregno; j++)
12789 reg_stat_type *rsp = &reg_stat[j];
12790 if (rsp->last_set_invalid
12791 /* If this is a pseudo-register that was only set once and not
12792 live at the beginning of the function, it is always valid. */
12793 || (! (regno >= FIRST_PSEUDO_REGISTER
12794 && REG_N_SETS (regno) == 1
12795 && (!REGNO_REG_SET_P
12796 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
12797 regno)))
12798 && rsp->last_set_label > tick))
12800 if (replace)
12801 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12802 return replace;
12806 return 1;
12808 /* If this is a memory reference, make sure that there were no stores after
12809 it that might have clobbered the value. We don't have alias info, so we
12810 assume any store invalidates it. Moreover, we only have local UIDs, so
12811 we also assume that there were stores in the intervening basic blocks. */
12812 else if (MEM_P (x) && !MEM_READONLY_P (x)
12813 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
12815 if (replace)
12816 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12817 return replace;
12820 for (i = 0; i < len; i++)
12822 if (fmt[i] == 'e')
12824 /* Check for identical subexpressions. If x contains
12825 identical subexpression we only have to traverse one of
12826 them. */
12827 if (i == 1 && ARITHMETIC_P (x))
12829 /* Note that at this point x0 has already been checked
12830 and found valid. */
12831 rtx x0 = XEXP (x, 0);
12832 rtx x1 = XEXP (x, 1);
12834 /* If x0 and x1 are identical then x is also valid. */
12835 if (x0 == x1)
12836 return 1;
12838 /* If x1 is identical to a subexpression of x0 then
12839 while checking x0, x1 has already been checked. Thus
12840 it is valid and so as x. */
12841 if (ARITHMETIC_P (x0)
12842 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12843 return 1;
12845 /* If x0 is identical to a subexpression of x1 then x is
12846 valid iff the rest of x1 is valid. */
12847 if (ARITHMETIC_P (x1)
12848 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12849 return
12850 get_last_value_validate (&XEXP (x1,
12851 x0 == XEXP (x1, 0) ? 1 : 0),
12852 insn, tick, replace);
12855 if (get_last_value_validate (&XEXP (x, i), insn, tick,
12856 replace) == 0)
12857 return 0;
12859 else if (fmt[i] == 'E')
12860 for (j = 0; j < XVECLEN (x, i); j++)
12861 if (get_last_value_validate (&XVECEXP (x, i, j),
12862 insn, tick, replace) == 0)
12863 return 0;
12866 /* If we haven't found a reason for it to be invalid, it is valid. */
12867 return 1;
12870 /* Get the last value assigned to X, if known. Some registers
12871 in the value may be replaced with (clobber (const_int 0)) if their value
12872 is known longer known reliably. */
12874 static rtx
12875 get_last_value (const_rtx x)
12877 unsigned int regno;
12878 rtx value;
12879 reg_stat_type *rsp;
12881 /* If this is a non-paradoxical SUBREG, get the value of its operand and
12882 then convert it to the desired mode. If this is a paradoxical SUBREG,
12883 we cannot predict what values the "extra" bits might have. */
12884 if (GET_CODE (x) == SUBREG
12885 && subreg_lowpart_p (x)
12886 && !paradoxical_subreg_p (x)
12887 && (value = get_last_value (SUBREG_REG (x))) != 0)
12888 return gen_lowpart (GET_MODE (x), value);
12890 if (!REG_P (x))
12891 return 0;
12893 regno = REGNO (x);
12894 rsp = &reg_stat[regno];
12895 value = rsp->last_set_value;
12897 /* If we don't have a value, or if it isn't for this basic block and
12898 it's either a hard register, set more than once, or it's a live
12899 at the beginning of the function, return 0.
12901 Because if it's not live at the beginning of the function then the reg
12902 is always set before being used (is never used without being set).
12903 And, if it's set only once, and it's always set before use, then all
12904 uses must have the same last value, even if it's not from this basic
12905 block. */
12907 if (value == 0
12908 || (rsp->last_set_label < label_tick_ebb_start
12909 && (regno < FIRST_PSEUDO_REGISTER
12910 || REG_N_SETS (regno) != 1
12911 || REGNO_REG_SET_P
12912 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
12913 return 0;
12915 /* If the value was set in a later insn than the ones we are processing,
12916 we can't use it even if the register was only set once. */
12917 if (rsp->last_set_label == label_tick
12918 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
12919 return 0;
12921 /* If the value has all its registers valid, return it. */
12922 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
12923 return value;
12925 /* Otherwise, make a copy and replace any invalid register with
12926 (clobber (const_int 0)). If that fails for some reason, return 0. */
12928 value = copy_rtx (value);
12929 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
12930 return value;
12932 return 0;
12935 /* Return nonzero if expression X refers to a REG or to memory
12936 that is set in an instruction more recent than FROM_LUID. */
12938 static int
12939 use_crosses_set_p (const_rtx x, int from_luid)
12941 const char *fmt;
12942 int i;
12943 enum rtx_code code = GET_CODE (x);
12945 if (code == REG)
12947 unsigned int regno = REGNO (x);
12948 unsigned endreg = END_REGNO (x);
12950 #ifdef PUSH_ROUNDING
12951 /* Don't allow uses of the stack pointer to be moved,
12952 because we don't know whether the move crosses a push insn. */
12953 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
12954 return 1;
12955 #endif
12956 for (; regno < endreg; regno++)
12958 reg_stat_type *rsp = &reg_stat[regno];
12959 if (rsp->last_set
12960 && rsp->last_set_label == label_tick
12961 && DF_INSN_LUID (rsp->last_set) > from_luid)
12962 return 1;
12964 return 0;
12967 if (code == MEM && mem_last_set > from_luid)
12968 return 1;
12970 fmt = GET_RTX_FORMAT (code);
12972 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12974 if (fmt[i] == 'E')
12976 int j;
12977 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12978 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
12979 return 1;
12981 else if (fmt[i] == 'e'
12982 && use_crosses_set_p (XEXP (x, i), from_luid))
12983 return 1;
12985 return 0;
12988 /* Define three variables used for communication between the following
12989 routines. */
12991 static unsigned int reg_dead_regno, reg_dead_endregno;
12992 static int reg_dead_flag;
12994 /* Function called via note_stores from reg_dead_at_p.
12996 If DEST is within [reg_dead_regno, reg_dead_endregno), set
12997 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
12999 static void
13000 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13002 unsigned int regno, endregno;
13004 if (!REG_P (dest))
13005 return;
13007 regno = REGNO (dest);
13008 endregno = END_REGNO (dest);
13009 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13010 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13013 /* Return nonzero if REG is known to be dead at INSN.
13015 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13016 referencing REG, it is dead. If we hit a SET referencing REG, it is
13017 live. Otherwise, see if it is live or dead at the start of the basic
13018 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13019 must be assumed to be always live. */
13021 static int
13022 reg_dead_at_p (rtx reg, rtx_insn *insn)
13024 basic_block block;
13025 unsigned int i;
13027 /* Set variables for reg_dead_at_p_1. */
13028 reg_dead_regno = REGNO (reg);
13029 reg_dead_endregno = END_REGNO (reg);
13031 reg_dead_flag = 0;
13033 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13034 we allow the machine description to decide whether use-and-clobber
13035 patterns are OK. */
13036 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13038 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13039 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13040 return 0;
13043 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13044 beginning of basic block. */
13045 block = BLOCK_FOR_INSN (insn);
13046 for (;;)
13048 if (INSN_P (insn))
13050 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13051 return 1;
13053 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13054 if (reg_dead_flag)
13055 return reg_dead_flag == 1 ? 1 : 0;
13057 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13058 return 1;
13061 if (insn == BB_HEAD (block))
13062 break;
13064 insn = PREV_INSN (insn);
13067 /* Look at live-in sets for the basic block that we were in. */
13068 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13069 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13070 return 0;
13072 return 1;
13075 /* Note hard registers in X that are used. */
13077 static void
13078 mark_used_regs_combine (rtx x)
13080 RTX_CODE code = GET_CODE (x);
13081 unsigned int regno;
13082 int i;
13084 switch (code)
13086 case LABEL_REF:
13087 case SYMBOL_REF:
13088 case CONST:
13089 CASE_CONST_ANY:
13090 case PC:
13091 case ADDR_VEC:
13092 case ADDR_DIFF_VEC:
13093 case ASM_INPUT:
13094 #ifdef HAVE_cc0
13095 /* CC0 must die in the insn after it is set, so we don't need to take
13096 special note of it here. */
13097 case CC0:
13098 #endif
13099 return;
13101 case CLOBBER:
13102 /* If we are clobbering a MEM, mark any hard registers inside the
13103 address as used. */
13104 if (MEM_P (XEXP (x, 0)))
13105 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13106 return;
13108 case REG:
13109 regno = REGNO (x);
13110 /* A hard reg in a wide mode may really be multiple registers.
13111 If so, mark all of them just like the first. */
13112 if (regno < FIRST_PSEUDO_REGISTER)
13114 /* None of this applies to the stack, frame or arg pointers. */
13115 if (regno == STACK_POINTER_REGNUM
13116 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
13117 || regno == HARD_FRAME_POINTER_REGNUM
13118 #endif
13119 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13120 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13121 #endif
13122 || regno == FRAME_POINTER_REGNUM)
13123 return;
13125 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13127 return;
13129 case SET:
13131 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13132 the address. */
13133 rtx testreg = SET_DEST (x);
13135 while (GET_CODE (testreg) == SUBREG
13136 || GET_CODE (testreg) == ZERO_EXTRACT
13137 || GET_CODE (testreg) == STRICT_LOW_PART)
13138 testreg = XEXP (testreg, 0);
13140 if (MEM_P (testreg))
13141 mark_used_regs_combine (XEXP (testreg, 0));
13143 mark_used_regs_combine (SET_SRC (x));
13145 return;
13147 default:
13148 break;
13151 /* Recursively scan the operands of this expression. */
13154 const char *fmt = GET_RTX_FORMAT (code);
13156 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13158 if (fmt[i] == 'e')
13159 mark_used_regs_combine (XEXP (x, i));
13160 else if (fmt[i] == 'E')
13162 int j;
13164 for (j = 0; j < XVECLEN (x, i); j++)
13165 mark_used_regs_combine (XVECEXP (x, i, j));
13171 /* Remove register number REGNO from the dead registers list of INSN.
13173 Return the note used to record the death, if there was one. */
13176 remove_death (unsigned int regno, rtx_insn *insn)
13178 rtx note = find_regno_note (insn, REG_DEAD, regno);
13180 if (note)
13181 remove_note (insn, note);
13183 return note;
13186 /* For each register (hardware or pseudo) used within expression X, if its
13187 death is in an instruction with luid between FROM_LUID (inclusive) and
13188 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13189 list headed by PNOTES.
13191 That said, don't move registers killed by maybe_kill_insn.
13193 This is done when X is being merged by combination into TO_INSN. These
13194 notes will then be distributed as needed. */
13196 static void
13197 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13198 rtx *pnotes)
13200 const char *fmt;
13201 int len, i;
13202 enum rtx_code code = GET_CODE (x);
13204 if (code == REG)
13206 unsigned int regno = REGNO (x);
13207 rtx_insn *where_dead = reg_stat[regno].last_death;
13209 /* Don't move the register if it gets killed in between from and to. */
13210 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13211 && ! reg_referenced_p (x, maybe_kill_insn))
13212 return;
13214 if (where_dead
13215 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13216 && DF_INSN_LUID (where_dead) >= from_luid
13217 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13219 rtx note = remove_death (regno, where_dead);
13221 /* It is possible for the call above to return 0. This can occur
13222 when last_death points to I2 or I1 that we combined with.
13223 In that case make a new note.
13225 We must also check for the case where X is a hard register
13226 and NOTE is a death note for a range of hard registers
13227 including X. In that case, we must put REG_DEAD notes for
13228 the remaining registers in place of NOTE. */
13230 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13231 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13232 > GET_MODE_SIZE (GET_MODE (x))))
13234 unsigned int deadregno = REGNO (XEXP (note, 0));
13235 unsigned int deadend = END_HARD_REGNO (XEXP (note, 0));
13236 unsigned int ourend = END_HARD_REGNO (x);
13237 unsigned int i;
13239 for (i = deadregno; i < deadend; i++)
13240 if (i < regno || i >= ourend)
13241 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13244 /* If we didn't find any note, or if we found a REG_DEAD note that
13245 covers only part of the given reg, and we have a multi-reg hard
13246 register, then to be safe we must check for REG_DEAD notes
13247 for each register other than the first. They could have
13248 their own REG_DEAD notes lying around. */
13249 else if ((note == 0
13250 || (note != 0
13251 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13252 < GET_MODE_SIZE (GET_MODE (x)))))
13253 && regno < FIRST_PSEUDO_REGISTER
13254 && hard_regno_nregs[regno][GET_MODE (x)] > 1)
13256 unsigned int ourend = END_HARD_REGNO (x);
13257 unsigned int i, offset;
13258 rtx oldnotes = 0;
13260 if (note)
13261 offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
13262 else
13263 offset = 1;
13265 for (i = regno + offset; i < ourend; i++)
13266 move_deaths (regno_reg_rtx[i],
13267 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13270 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13272 XEXP (note, 1) = *pnotes;
13273 *pnotes = note;
13275 else
13276 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13279 return;
13282 else if (GET_CODE (x) == SET)
13284 rtx dest = SET_DEST (x);
13286 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13288 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13289 that accesses one word of a multi-word item, some
13290 piece of everything register in the expression is used by
13291 this insn, so remove any old death. */
13292 /* ??? So why do we test for equality of the sizes? */
13294 if (GET_CODE (dest) == ZERO_EXTRACT
13295 || GET_CODE (dest) == STRICT_LOW_PART
13296 || (GET_CODE (dest) == SUBREG
13297 && (((GET_MODE_SIZE (GET_MODE (dest))
13298 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13299 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13300 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13302 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13303 return;
13306 /* If this is some other SUBREG, we know it replaces the entire
13307 value, so use that as the destination. */
13308 if (GET_CODE (dest) == SUBREG)
13309 dest = SUBREG_REG (dest);
13311 /* If this is a MEM, adjust deaths of anything used in the address.
13312 For a REG (the only other possibility), the entire value is
13313 being replaced so the old value is not used in this insn. */
13315 if (MEM_P (dest))
13316 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13317 to_insn, pnotes);
13318 return;
13321 else if (GET_CODE (x) == CLOBBER)
13322 return;
13324 len = GET_RTX_LENGTH (code);
13325 fmt = GET_RTX_FORMAT (code);
13327 for (i = 0; i < len; i++)
13329 if (fmt[i] == 'E')
13331 int j;
13332 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13333 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13334 to_insn, pnotes);
13336 else if (fmt[i] == 'e')
13337 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13341 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13342 pattern of an insn. X must be a REG. */
13344 static int
13345 reg_bitfield_target_p (rtx x, rtx body)
13347 int i;
13349 if (GET_CODE (body) == SET)
13351 rtx dest = SET_DEST (body);
13352 rtx target;
13353 unsigned int regno, tregno, endregno, endtregno;
13355 if (GET_CODE (dest) == ZERO_EXTRACT)
13356 target = XEXP (dest, 0);
13357 else if (GET_CODE (dest) == STRICT_LOW_PART)
13358 target = SUBREG_REG (XEXP (dest, 0));
13359 else
13360 return 0;
13362 if (GET_CODE (target) == SUBREG)
13363 target = SUBREG_REG (target);
13365 if (!REG_P (target))
13366 return 0;
13368 tregno = REGNO (target), regno = REGNO (x);
13369 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13370 return target == x;
13372 endtregno = end_hard_regno (GET_MODE (target), tregno);
13373 endregno = end_hard_regno (GET_MODE (x), regno);
13375 return endregno > tregno && regno < endtregno;
13378 else if (GET_CODE (body) == PARALLEL)
13379 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13380 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13381 return 1;
13383 return 0;
13386 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13387 as appropriate. I3 and I2 are the insns resulting from the combination
13388 insns including FROM (I2 may be zero).
13390 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13391 not need REG_DEAD notes because they are being substituted for. This
13392 saves searching in the most common cases.
13394 Each note in the list is either ignored or placed on some insns, depending
13395 on the type of note. */
13397 static void
13398 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
13399 rtx elim_i2, rtx elim_i1, rtx elim_i0)
13401 rtx note, next_note;
13402 rtx tem_note;
13403 rtx_insn *tem_insn;
13405 for (note = notes; note; note = next_note)
13407 rtx_insn *place = 0, *place2 = 0;
13409 next_note = XEXP (note, 1);
13410 switch (REG_NOTE_KIND (note))
13412 case REG_BR_PROB:
13413 case REG_BR_PRED:
13414 /* Doesn't matter much where we put this, as long as it's somewhere.
13415 It is preferable to keep these notes on branches, which is most
13416 likely to be i3. */
13417 place = i3;
13418 break;
13420 case REG_NON_LOCAL_GOTO:
13421 if (JUMP_P (i3))
13422 place = i3;
13423 else
13425 gcc_assert (i2 && JUMP_P (i2));
13426 place = i2;
13428 break;
13430 case REG_EH_REGION:
13431 /* These notes must remain with the call or trapping instruction. */
13432 if (CALL_P (i3))
13433 place = i3;
13434 else if (i2 && CALL_P (i2))
13435 place = i2;
13436 else
13438 gcc_assert (cfun->can_throw_non_call_exceptions);
13439 if (may_trap_p (i3))
13440 place = i3;
13441 else if (i2 && may_trap_p (i2))
13442 place = i2;
13443 /* ??? Otherwise assume we've combined things such that we
13444 can now prove that the instructions can't trap. Drop the
13445 note in this case. */
13447 break;
13449 case REG_ARGS_SIZE:
13450 /* ??? How to distribute between i3-i1. Assume i3 contains the
13451 entire adjustment. Assert i3 contains at least some adjust. */
13452 if (!noop_move_p (i3))
13454 int old_size, args_size = INTVAL (XEXP (note, 0));
13455 /* fixup_args_size_notes looks at REG_NORETURN note,
13456 so ensure the note is placed there first. */
13457 if (CALL_P (i3))
13459 rtx *np;
13460 for (np = &next_note; *np; np = &XEXP (*np, 1))
13461 if (REG_NOTE_KIND (*np) == REG_NORETURN)
13463 rtx n = *np;
13464 *np = XEXP (n, 1);
13465 XEXP (n, 1) = REG_NOTES (i3);
13466 REG_NOTES (i3) = n;
13467 break;
13470 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
13471 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13472 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13473 gcc_assert (old_size != args_size
13474 || (CALL_P (i3)
13475 && !ACCUMULATE_OUTGOING_ARGS
13476 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
13478 break;
13480 case REG_NORETURN:
13481 case REG_SETJMP:
13482 case REG_TM:
13483 case REG_CALL_DECL:
13484 /* These notes must remain with the call. It should not be
13485 possible for both I2 and I3 to be a call. */
13486 if (CALL_P (i3))
13487 place = i3;
13488 else
13490 gcc_assert (i2 && CALL_P (i2));
13491 place = i2;
13493 break;
13495 case REG_UNUSED:
13496 /* Any clobbers for i3 may still exist, and so we must process
13497 REG_UNUSED notes from that insn.
13499 Any clobbers from i2 or i1 can only exist if they were added by
13500 recog_for_combine. In that case, recog_for_combine created the
13501 necessary REG_UNUSED notes. Trying to keep any original
13502 REG_UNUSED notes from these insns can cause incorrect output
13503 if it is for the same register as the original i3 dest.
13504 In that case, we will notice that the register is set in i3,
13505 and then add a REG_UNUSED note for the destination of i3, which
13506 is wrong. However, it is possible to have REG_UNUSED notes from
13507 i2 or i1 for register which were both used and clobbered, so
13508 we keep notes from i2 or i1 if they will turn into REG_DEAD
13509 notes. */
13511 /* If this register is set or clobbered in I3, put the note there
13512 unless there is one already. */
13513 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13515 if (from_insn != i3)
13516 break;
13518 if (! (REG_P (XEXP (note, 0))
13519 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13520 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13521 place = i3;
13523 /* Otherwise, if this register is used by I3, then this register
13524 now dies here, so we must put a REG_DEAD note here unless there
13525 is one already. */
13526 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13527 && ! (REG_P (XEXP (note, 0))
13528 ? find_regno_note (i3, REG_DEAD,
13529 REGNO (XEXP (note, 0)))
13530 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13532 PUT_REG_NOTE_KIND (note, REG_DEAD);
13533 place = i3;
13535 break;
13537 case REG_EQUAL:
13538 case REG_EQUIV:
13539 case REG_NOALIAS:
13540 /* These notes say something about results of an insn. We can
13541 only support them if they used to be on I3 in which case they
13542 remain on I3. Otherwise they are ignored.
13544 If the note refers to an expression that is not a constant, we
13545 must also ignore the note since we cannot tell whether the
13546 equivalence is still true. It might be possible to do
13547 slightly better than this (we only have a problem if I2DEST
13548 or I1DEST is present in the expression), but it doesn't
13549 seem worth the trouble. */
13551 if (from_insn == i3
13552 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13553 place = i3;
13554 break;
13556 case REG_INC:
13557 /* These notes say something about how a register is used. They must
13558 be present on any use of the register in I2 or I3. */
13559 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13560 place = i3;
13562 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13564 if (place)
13565 place2 = i2;
13566 else
13567 place = i2;
13569 break;
13571 case REG_LABEL_TARGET:
13572 case REG_LABEL_OPERAND:
13573 /* This can show up in several ways -- either directly in the
13574 pattern, or hidden off in the constant pool with (or without?)
13575 a REG_EQUAL note. */
13576 /* ??? Ignore the without-reg_equal-note problem for now. */
13577 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13578 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13579 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
13580 && LABEL_REF_LABEL (XEXP (tem_note, 0)) == XEXP (note, 0)))
13581 place = i3;
13583 if (i2
13584 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13585 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13586 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
13587 && LABEL_REF_LABEL (XEXP (tem_note, 0)) == XEXP (note, 0))))
13589 if (place)
13590 place2 = i2;
13591 else
13592 place = i2;
13595 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13596 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13597 there. */
13598 if (place && JUMP_P (place)
13599 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13600 && (JUMP_LABEL (place) == NULL
13601 || JUMP_LABEL (place) == XEXP (note, 0)))
13603 rtx label = JUMP_LABEL (place);
13605 if (!label)
13606 JUMP_LABEL (place) = XEXP (note, 0);
13607 else if (LABEL_P (label))
13608 LABEL_NUSES (label)--;
13611 if (place2 && JUMP_P (place2)
13612 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13613 && (JUMP_LABEL (place2) == NULL
13614 || JUMP_LABEL (place2) == XEXP (note, 0)))
13616 rtx label = JUMP_LABEL (place2);
13618 if (!label)
13619 JUMP_LABEL (place2) = XEXP (note, 0);
13620 else if (LABEL_P (label))
13621 LABEL_NUSES (label)--;
13622 place2 = 0;
13624 break;
13626 case REG_NONNEG:
13627 /* This note says something about the value of a register prior
13628 to the execution of an insn. It is too much trouble to see
13629 if the note is still correct in all situations. It is better
13630 to simply delete it. */
13631 break;
13633 case REG_DEAD:
13634 /* If we replaced the right hand side of FROM_INSN with a
13635 REG_EQUAL note, the original use of the dying register
13636 will not have been combined into I3 and I2. In such cases,
13637 FROM_INSN is guaranteed to be the first of the combined
13638 instructions, so we simply need to search back before
13639 FROM_INSN for the previous use or set of this register,
13640 then alter the notes there appropriately.
13642 If the register is used as an input in I3, it dies there.
13643 Similarly for I2, if it is nonzero and adjacent to I3.
13645 If the register is not used as an input in either I3 or I2
13646 and it is not one of the registers we were supposed to eliminate,
13647 there are two possibilities. We might have a non-adjacent I2
13648 or we might have somehow eliminated an additional register
13649 from a computation. For example, we might have had A & B where
13650 we discover that B will always be zero. In this case we will
13651 eliminate the reference to A.
13653 In both cases, we must search to see if we can find a previous
13654 use of A and put the death note there. */
13656 if (from_insn
13657 && from_insn == i2mod
13658 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13659 tem_insn = from_insn;
13660 else
13662 if (from_insn
13663 && CALL_P (from_insn)
13664 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13665 place = from_insn;
13666 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13667 place = i3;
13668 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13669 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13670 place = i2;
13671 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13672 && !(i2mod
13673 && reg_overlap_mentioned_p (XEXP (note, 0),
13674 i2mod_old_rhs)))
13675 || rtx_equal_p (XEXP (note, 0), elim_i1)
13676 || rtx_equal_p (XEXP (note, 0), elim_i0))
13677 break;
13678 tem_insn = i3;
13681 if (place == 0)
13683 basic_block bb = this_basic_block;
13685 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
13687 if (!NONDEBUG_INSN_P (tem_insn))
13689 if (tem_insn == BB_HEAD (bb))
13690 break;
13691 continue;
13694 /* If the register is being set at TEM_INSN, see if that is all
13695 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
13696 into a REG_UNUSED note instead. Don't delete sets to
13697 global register vars. */
13698 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
13699 || !global_regs[REGNO (XEXP (note, 0))])
13700 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
13702 rtx set = single_set (tem_insn);
13703 rtx inner_dest = 0;
13704 #ifdef HAVE_cc0
13705 rtx_insn *cc0_setter = NULL;
13706 #endif
13708 if (set != 0)
13709 for (inner_dest = SET_DEST (set);
13710 (GET_CODE (inner_dest) == STRICT_LOW_PART
13711 || GET_CODE (inner_dest) == SUBREG
13712 || GET_CODE (inner_dest) == ZERO_EXTRACT);
13713 inner_dest = XEXP (inner_dest, 0))
13716 /* Verify that it was the set, and not a clobber that
13717 modified the register.
13719 CC0 targets must be careful to maintain setter/user
13720 pairs. If we cannot delete the setter due to side
13721 effects, mark the user with an UNUSED note instead
13722 of deleting it. */
13724 if (set != 0 && ! side_effects_p (SET_SRC (set))
13725 && rtx_equal_p (XEXP (note, 0), inner_dest)
13726 #ifdef HAVE_cc0
13727 && (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
13728 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
13729 && sets_cc0_p (PATTERN (cc0_setter)) > 0))
13730 #endif
13733 /* Move the notes and links of TEM_INSN elsewhere.
13734 This might delete other dead insns recursively.
13735 First set the pattern to something that won't use
13736 any register. */
13737 rtx old_notes = REG_NOTES (tem_insn);
13739 PATTERN (tem_insn) = pc_rtx;
13740 REG_NOTES (tem_insn) = NULL;
13742 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
13743 NULL_RTX, NULL_RTX, NULL_RTX);
13744 distribute_links (LOG_LINKS (tem_insn));
13746 SET_INSN_DELETED (tem_insn);
13747 if (tem_insn == i2)
13748 i2 = NULL;
13750 #ifdef HAVE_cc0
13751 /* Delete the setter too. */
13752 if (cc0_setter)
13754 PATTERN (cc0_setter) = pc_rtx;
13755 old_notes = REG_NOTES (cc0_setter);
13756 REG_NOTES (cc0_setter) = NULL;
13758 distribute_notes (old_notes, cc0_setter,
13759 cc0_setter, NULL,
13760 NULL_RTX, NULL_RTX, NULL_RTX);
13761 distribute_links (LOG_LINKS (cc0_setter));
13763 SET_INSN_DELETED (cc0_setter);
13764 if (cc0_setter == i2)
13765 i2 = NULL;
13767 #endif
13769 else
13771 PUT_REG_NOTE_KIND (note, REG_UNUSED);
13773 /* If there isn't already a REG_UNUSED note, put one
13774 here. Do not place a REG_DEAD note, even if
13775 the register is also used here; that would not
13776 match the algorithm used in lifetime analysis
13777 and can cause the consistency check in the
13778 scheduler to fail. */
13779 if (! find_regno_note (tem_insn, REG_UNUSED,
13780 REGNO (XEXP (note, 0))))
13781 place = tem_insn;
13782 break;
13785 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
13786 || (CALL_P (tem_insn)
13787 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
13789 place = tem_insn;
13791 /* If we are doing a 3->2 combination, and we have a
13792 register which formerly died in i3 and was not used
13793 by i2, which now no longer dies in i3 and is used in
13794 i2 but does not die in i2, and place is between i2
13795 and i3, then we may need to move a link from place to
13796 i2. */
13797 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
13798 && from_insn
13799 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
13800 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13802 struct insn_link *links = LOG_LINKS (place);
13803 LOG_LINKS (place) = NULL;
13804 distribute_links (links);
13806 break;
13809 if (tem_insn == BB_HEAD (bb))
13810 break;
13815 /* If the register is set or already dead at PLACE, we needn't do
13816 anything with this note if it is still a REG_DEAD note.
13817 We check here if it is set at all, not if is it totally replaced,
13818 which is what `dead_or_set_p' checks, so also check for it being
13819 set partially. */
13821 if (place && REG_NOTE_KIND (note) == REG_DEAD)
13823 unsigned int regno = REGNO (XEXP (note, 0));
13824 reg_stat_type *rsp = &reg_stat[regno];
13826 if (dead_or_set_p (place, XEXP (note, 0))
13827 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
13829 /* Unless the register previously died in PLACE, clear
13830 last_death. [I no longer understand why this is
13831 being done.] */
13832 if (rsp->last_death != place)
13833 rsp->last_death = 0;
13834 place = 0;
13836 else
13837 rsp->last_death = place;
13839 /* If this is a death note for a hard reg that is occupying
13840 multiple registers, ensure that we are still using all
13841 parts of the object. If we find a piece of the object
13842 that is unused, we must arrange for an appropriate REG_DEAD
13843 note to be added for it. However, we can't just emit a USE
13844 and tag the note to it, since the register might actually
13845 be dead; so we recourse, and the recursive call then finds
13846 the previous insn that used this register. */
13848 if (place && regno < FIRST_PSEUDO_REGISTER
13849 && hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1)
13851 unsigned int endregno = END_HARD_REGNO (XEXP (note, 0));
13852 bool all_used = true;
13853 unsigned int i;
13855 for (i = regno; i < endregno; i++)
13856 if ((! refers_to_regno_p (i, PATTERN (place))
13857 && ! find_regno_fusage (place, USE, i))
13858 || dead_or_set_regno_p (place, i))
13860 all_used = false;
13861 break;
13864 if (! all_used)
13866 /* Put only REG_DEAD notes for pieces that are
13867 not already dead or set. */
13869 for (i = regno; i < endregno;
13870 i += hard_regno_nregs[i][reg_raw_mode[i]])
13872 rtx piece = regno_reg_rtx[i];
13873 basic_block bb = this_basic_block;
13875 if (! dead_or_set_p (place, piece)
13876 && ! reg_bitfield_target_p (piece,
13877 PATTERN (place)))
13879 rtx new_note = alloc_reg_note (REG_DEAD, piece,
13880 NULL_RTX);
13882 distribute_notes (new_note, place, place,
13883 NULL, NULL_RTX, NULL_RTX,
13884 NULL_RTX);
13886 else if (! refers_to_regno_p (i, PATTERN (place))
13887 && ! find_regno_fusage (place, USE, i))
13888 for (tem_insn = PREV_INSN (place); ;
13889 tem_insn = PREV_INSN (tem_insn))
13891 if (!NONDEBUG_INSN_P (tem_insn))
13893 if (tem_insn == BB_HEAD (bb))
13894 break;
13895 continue;
13897 if (dead_or_set_p (tem_insn, piece)
13898 || reg_bitfield_target_p (piece,
13899 PATTERN (tem_insn)))
13901 add_reg_note (tem_insn, REG_UNUSED, piece);
13902 break;
13907 place = 0;
13911 break;
13913 default:
13914 /* Any other notes should not be present at this point in the
13915 compilation. */
13916 gcc_unreachable ();
13919 if (place)
13921 XEXP (note, 1) = REG_NOTES (place);
13922 REG_NOTES (place) = note;
13925 if (place2)
13926 add_shallow_copy_of_reg_note (place2, note);
13930 /* Similarly to above, distribute the LOG_LINKS that used to be present on
13931 I3, I2, and I1 to new locations. This is also called to add a link
13932 pointing at I3 when I3's destination is changed. */
13934 static void
13935 distribute_links (struct insn_link *links)
13937 struct insn_link *link, *next_link;
13939 for (link = links; link; link = next_link)
13941 rtx_insn *place = 0;
13942 rtx_insn *insn;
13943 rtx set, reg;
13945 next_link = link->next;
13947 /* If the insn that this link points to is a NOTE, ignore it. */
13948 if (NOTE_P (link->insn))
13949 continue;
13951 set = 0;
13952 rtx pat = PATTERN (link->insn);
13953 if (GET_CODE (pat) == SET)
13954 set = pat;
13955 else if (GET_CODE (pat) == PARALLEL)
13957 int i;
13958 for (i = 0; i < XVECLEN (pat, 0); i++)
13960 set = XVECEXP (pat, 0, i);
13961 if (GET_CODE (set) != SET)
13962 continue;
13964 reg = SET_DEST (set);
13965 while (GET_CODE (reg) == ZERO_EXTRACT
13966 || GET_CODE (reg) == STRICT_LOW_PART
13967 || GET_CODE (reg) == SUBREG)
13968 reg = XEXP (reg, 0);
13970 if (!REG_P (reg))
13971 continue;
13973 if (REGNO (reg) == link->regno)
13974 break;
13976 if (i == XVECLEN (pat, 0))
13977 continue;
13979 else
13980 continue;
13982 reg = SET_DEST (set);
13984 while (GET_CODE (reg) == ZERO_EXTRACT
13985 || GET_CODE (reg) == STRICT_LOW_PART
13986 || GET_CODE (reg) == SUBREG)
13987 reg = XEXP (reg, 0);
13989 /* A LOG_LINK is defined as being placed on the first insn that uses
13990 a register and points to the insn that sets the register. Start
13991 searching at the next insn after the target of the link and stop
13992 when we reach a set of the register or the end of the basic block.
13994 Note that this correctly handles the link that used to point from
13995 I3 to I2. Also note that not much searching is typically done here
13996 since most links don't point very far away. */
13998 for (insn = NEXT_INSN (link->insn);
13999 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14000 || BB_HEAD (this_basic_block->next_bb) != insn));
14001 insn = NEXT_INSN (insn))
14002 if (DEBUG_INSN_P (insn))
14003 continue;
14004 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14006 if (reg_referenced_p (reg, PATTERN (insn)))
14007 place = insn;
14008 break;
14010 else if (CALL_P (insn)
14011 && find_reg_fusage (insn, USE, reg))
14013 place = insn;
14014 break;
14016 else if (INSN_P (insn) && reg_set_p (reg, insn))
14017 break;
14019 /* If we found a place to put the link, place it there unless there
14020 is already a link to the same insn as LINK at that point. */
14022 if (place)
14024 struct insn_link *link2;
14026 FOR_EACH_LOG_LINK (link2, place)
14027 if (link2->insn == link->insn && link2->regno == link->regno)
14028 break;
14030 if (link2 == NULL)
14032 link->next = LOG_LINKS (place);
14033 LOG_LINKS (place) = link;
14035 /* Set added_links_insn to the earliest insn we added a
14036 link to. */
14037 if (added_links_insn == 0
14038 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14039 added_links_insn = place;
14045 /* Check for any register or memory mentioned in EQUIV that is not
14046 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14047 of EXPR where some registers may have been replaced by constants. */
14049 static bool
14050 unmentioned_reg_p (rtx equiv, rtx expr)
14052 subrtx_iterator::array_type array;
14053 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14055 const_rtx x = *iter;
14056 if ((REG_P (x) || MEM_P (x))
14057 && !reg_mentioned_p (x, expr))
14058 return true;
14060 return false;
14063 DEBUG_FUNCTION void
14064 dump_combine_stats (FILE *file)
14066 fprintf
14067 (file,
14068 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14069 combine_attempts, combine_merges, combine_extras, combine_successes);
14072 void
14073 dump_combine_total_stats (FILE *file)
14075 fprintf
14076 (file,
14077 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14078 total_attempts, total_merges, total_extras, total_successes);
14081 /* Try combining insns through substitution. */
14082 static unsigned int
14083 rest_of_handle_combine (void)
14085 int rebuild_jump_labels_after_combine;
14087 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14088 df_note_add_problem ();
14089 df_analyze ();
14091 regstat_init_n_sets_and_refs ();
14093 rebuild_jump_labels_after_combine
14094 = combine_instructions (get_insns (), max_reg_num ());
14096 /* Combining insns may have turned an indirect jump into a
14097 direct jump. Rebuild the JUMP_LABEL fields of jumping
14098 instructions. */
14099 if (rebuild_jump_labels_after_combine)
14101 timevar_push (TV_JUMP);
14102 rebuild_jump_labels (get_insns ());
14103 cleanup_cfg (0);
14104 timevar_pop (TV_JUMP);
14107 regstat_free_n_sets_and_refs ();
14108 return 0;
14111 namespace {
14113 const pass_data pass_data_combine =
14115 RTL_PASS, /* type */
14116 "combine", /* name */
14117 OPTGROUP_NONE, /* optinfo_flags */
14118 TV_COMBINE, /* tv_id */
14119 PROP_cfglayout, /* properties_required */
14120 0, /* properties_provided */
14121 0, /* properties_destroyed */
14122 0, /* todo_flags_start */
14123 TODO_df_finish, /* todo_flags_finish */
14126 class pass_combine : public rtl_opt_pass
14128 public:
14129 pass_combine (gcc::context *ctxt)
14130 : rtl_opt_pass (pass_data_combine, ctxt)
14133 /* opt_pass methods: */
14134 virtual bool gate (function *) { return (optimize > 0); }
14135 virtual unsigned int execute (function *)
14137 return rest_of_handle_combine ();
14140 }; // class pass_combine
14142 } // anon namespace
14144 rtl_opt_pass *
14145 make_pass_combine (gcc::context *ctxt)
14147 return new pass_combine (ctxt);