PR rtl-optimization/82913
[official-gcc.git] / gcc / combine.c
blobeab10c599a61bffdbbecd1c37a38da0f30768cf8
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras;
120 /* Number of instructions combined in this function. */
122 static int combine_successes;
124 /* Totals over entire compilation. */
126 static int total_attempts, total_merges, total_extras, total_successes;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn *i2mod;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs;
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
149 /* Record last point of modification of (hard or pseudo) register n. */
150 rtx_insn *last_set;
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
159 following ways:
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
174 register's value
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
179 table.
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
197 rtx last_set_value;
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick;
204 /* Record the value of label_tick when the value for register n is placed in
205 last_set_value. */
207 int last_set_label;
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
232 zero.
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies;
238 unsigned HOST_WIDE_INT nonzero_bits;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
249 value. */
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
255 static vec<reg_stat_type> reg_stat;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn *subst_insn;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
301 that location. */
303 static rtx_insn *added_links_insn;
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block;
307 static bool optimize_this_for_speed_p;
310 /* Length of the currently allocated uid_insn_cost array. */
312 static int max_uid_known;
314 /* The following array records the insn_cost for every insn
315 in the instruction stream. */
317 static int *uid_insn_cost;
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
322 struct insn_link {
323 rtx_insn *insn;
324 unsigned int regno;
325 struct insn_link *next;
328 static struct insn_link **uid_log_links;
330 static inline int
331 insn_uid_check (const_rtx insn)
333 int uid = INSN_UID (insn);
334 gcc_checking_assert (uid <= max_uid_known);
335 return uid;
338 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
341 #define FOR_EACH_LOG_LINK(L, INSN) \
342 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
344 /* Links for LOG_LINKS are allocated from this obstack. */
346 static struct obstack insn_link_obstack;
348 /* Allocate a link. */
350 static inline struct insn_link *
351 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
353 struct insn_link *l
354 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
355 sizeof (struct insn_link));
356 l->insn = insn;
357 l->regno = regno;
358 l->next = next;
359 return l;
362 /* Incremented for each basic block. */
364 static int label_tick;
366 /* Reset to label_tick for each extended basic block in scanning order. */
368 static int label_tick_ebb_start;
370 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
371 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
373 static scalar_int_mode nonzero_bits_mode;
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376 be safely used. It is zero while computing them and after combine has
377 completed. This former test prevents propagating values based on
378 previously set values, which can be incorrect if a variable is modified
379 in a loop. */
381 static int nonzero_sign_valid;
384 /* Record one modification to rtl structure
385 to be undone by storing old_contents into *where. */
387 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
389 struct undo
391 struct undo *next;
392 enum undo_kind kind;
393 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
394 union { rtx *r; int *i; struct insn_link **l; } where;
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398 num_undo says how many are currently recorded.
400 other_insn is nonzero if we have modified some other insn in the process
401 of working on subst_insn. It must be verified too. */
403 struct undobuf
405 struct undo *undos;
406 struct undo *frees;
407 rtx_insn *other_insn;
410 static struct undobuf undobuf;
412 /* Number of times the pseudo being substituted for
413 was found and replaced. */
415 static int n_occurrences;
417 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
418 scalar_int_mode,
419 unsigned HOST_WIDE_INT *);
420 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
421 scalar_int_mode,
422 unsigned int *);
423 static void do_SUBST (rtx *, rtx);
424 static void do_SUBST_INT (int *, int);
425 static void init_reg_last (void);
426 static void setup_incoming_promotions (rtx_insn *);
427 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
428 static int cant_combine_insn_p (rtx_insn *);
429 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
430 rtx_insn *, rtx_insn *, rtx *, rtx *);
431 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
432 static int contains_muldiv (rtx);
433 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
434 int *, rtx_insn *);
435 static void undo_all (void);
436 static void undo_commit (void);
437 static rtx *find_split_point (rtx *, rtx_insn *, bool);
438 static rtx subst (rtx, rtx, rtx, int, int, int);
439 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
440 static rtx simplify_if_then_else (rtx);
441 static rtx simplify_set (rtx);
442 static rtx simplify_logical (rtx);
443 static rtx expand_compound_operation (rtx);
444 static const_rtx expand_field_assignment (const_rtx);
445 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
446 rtx, unsigned HOST_WIDE_INT, int, int, int);
447 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
448 unsigned HOST_WIDE_INT *);
449 static rtx canon_reg_for_combine (rtx, rtx);
450 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
451 scalar_int_mode, unsigned HOST_WIDE_INT, int);
452 static rtx force_to_mode (rtx, machine_mode,
453 unsigned HOST_WIDE_INT, int);
454 static rtx if_then_else_cond (rtx, rtx *, rtx *);
455 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
456 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
457 static rtx make_field_assignment (rtx);
458 static rtx apply_distributive_law (rtx);
459 static rtx distribute_and_simplify_rtx (rtx, int);
460 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
461 unsigned HOST_WIDE_INT);
462 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
463 unsigned HOST_WIDE_INT);
464 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
465 HOST_WIDE_INT, machine_mode, int *);
466 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
467 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
468 int);
469 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
470 static rtx gen_lowpart_for_combine (machine_mode, rtx);
471 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
472 rtx, rtx *);
473 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
474 static void update_table_tick (rtx);
475 static void record_value_for_reg (rtx, rtx_insn *, rtx);
476 static void check_promoted_subreg (rtx_insn *, rtx);
477 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
478 static void record_dead_and_set_regs (rtx_insn *);
479 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
480 static rtx get_last_value (const_rtx);
481 static int use_crosses_set_p (const_rtx, int);
482 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
483 static int reg_dead_at_p (rtx, rtx_insn *);
484 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
485 static int reg_bitfield_target_p (rtx, rtx);
486 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
487 static void distribute_links (struct insn_link *);
488 static void mark_used_regs_combine (rtx);
489 static void record_promoted_value (rtx_insn *, rtx);
490 static bool unmentioned_reg_p (rtx, rtx);
491 static void record_truncated_values (rtx *, void *);
492 static bool reg_truncated_to_mode (machine_mode, const_rtx);
493 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
496 /* It is not safe to use ordinary gen_lowpart in combine.
497 See comments in gen_lowpart_for_combine. */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
501 /* Our implementation of gen_lowpart never emits a new pseudo. */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
514 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518 Target hooks cannot use enum rtx_code. */
519 static inline void
520 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
521 bool op0_preserve_value)
523 int code_int = (int)*code;
524 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
525 *code = (enum rtx_code)code_int;
528 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
529 PATTERN can not be split. Otherwise, it returns an insn sequence.
530 This is a wrapper around split_insns which ensures that the
531 reg_stat vector is made larger if the splitter creates a new
532 register. */
534 static rtx_insn *
535 combine_split_insns (rtx pattern, rtx_insn *insn)
537 rtx_insn *ret;
538 unsigned int nregs;
540 ret = split_insns (pattern, insn);
541 nregs = max_reg_num ();
542 if (nregs > reg_stat.length ())
543 reg_stat.safe_grow_cleared (nregs);
544 return ret;
547 /* This is used by find_single_use to locate an rtx in LOC that
548 contains exactly one use of DEST, which is typically either a REG
549 or CC0. It returns a pointer to the innermost rtx expression
550 containing DEST. Appearances of DEST that are being used to
551 totally replace it are not counted. */
553 static rtx *
554 find_single_use_1 (rtx dest, rtx *loc)
556 rtx x = *loc;
557 enum rtx_code code = GET_CODE (x);
558 rtx *result = NULL;
559 rtx *this_result;
560 int i;
561 const char *fmt;
563 switch (code)
565 case CONST:
566 case LABEL_REF:
567 case SYMBOL_REF:
568 CASE_CONST_ANY:
569 case CLOBBER:
570 return 0;
572 case SET:
573 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 of a REG that occupies all of the REG, the insn uses DEST if
575 it is mentioned in the destination or the source. Otherwise, we
576 need just check the source. */
577 if (GET_CODE (SET_DEST (x)) != CC0
578 && GET_CODE (SET_DEST (x)) != PC
579 && !REG_P (SET_DEST (x))
580 && ! (GET_CODE (SET_DEST (x)) == SUBREG
581 && REG_P (SUBREG_REG (SET_DEST (x)))
582 && !read_modify_subreg_p (SET_DEST (x))))
583 break;
585 return find_single_use_1 (dest, &SET_SRC (x));
587 case MEM:
588 case SUBREG:
589 return find_single_use_1 (dest, &XEXP (x, 0));
591 default:
592 break;
595 /* If it wasn't one of the common cases above, check each expression and
596 vector of this code. Look for a unique usage of DEST. */
598 fmt = GET_RTX_FORMAT (code);
599 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
601 if (fmt[i] == 'e')
603 if (dest == XEXP (x, i)
604 || (REG_P (dest) && REG_P (XEXP (x, i))
605 && REGNO (dest) == REGNO (XEXP (x, i))))
606 this_result = loc;
607 else
608 this_result = find_single_use_1 (dest, &XEXP (x, i));
610 if (result == NULL)
611 result = this_result;
612 else if (this_result)
613 /* Duplicate usage. */
614 return NULL;
616 else if (fmt[i] == 'E')
618 int j;
620 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
622 if (XVECEXP (x, i, j) == dest
623 || (REG_P (dest)
624 && REG_P (XVECEXP (x, i, j))
625 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
626 this_result = loc;
627 else
628 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
630 if (result == NULL)
631 result = this_result;
632 else if (this_result)
633 return NULL;
638 return result;
642 /* See if DEST, produced in INSN, is used only a single time in the
643 sequel. If so, return a pointer to the innermost rtx expression in which
644 it is used.
646 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
648 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
649 care about REG_DEAD notes or LOG_LINKS.
651 Otherwise, we find the single use by finding an insn that has a
652 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
653 only referenced once in that insn, we know that it must be the first
654 and last insn referencing DEST. */
656 static rtx *
657 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
659 basic_block bb;
660 rtx_insn *next;
661 rtx *result;
662 struct insn_link *link;
664 if (dest == cc0_rtx)
666 next = NEXT_INSN (insn);
667 if (next == 0
668 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
669 return 0;
671 result = find_single_use_1 (dest, &PATTERN (next));
672 if (result && ploc)
673 *ploc = next;
674 return result;
677 if (!REG_P (dest))
678 return 0;
680 bb = BLOCK_FOR_INSN (insn);
681 for (next = NEXT_INSN (insn);
682 next && BLOCK_FOR_INSN (next) == bb;
683 next = NEXT_INSN (next))
684 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
686 FOR_EACH_LOG_LINK (link, next)
687 if (link->insn == insn && link->regno == REGNO (dest))
688 break;
690 if (link)
692 result = find_single_use_1 (dest, &PATTERN (next));
693 if (ploc)
694 *ploc = next;
695 return result;
699 return 0;
702 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
703 insn. The substitution can be undone by undo_all. If INTO is already
704 set to NEWVAL, do not record this change. Because computing NEWVAL might
705 also call SUBST, we have to compute it before we put anything into
706 the undo table. */
708 static void
709 do_SUBST (rtx *into, rtx newval)
711 struct undo *buf;
712 rtx oldval = *into;
714 if (oldval == newval)
715 return;
717 /* We'd like to catch as many invalid transformations here as
718 possible. Unfortunately, there are way too many mode changes
719 that are perfectly valid, so we'd waste too much effort for
720 little gain doing the checks here. Focus on catching invalid
721 transformations involving integer constants. */
722 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
723 && CONST_INT_P (newval))
725 /* Sanity check that we're replacing oldval with a CONST_INT
726 that is a valid sign-extension for the original mode. */
727 gcc_assert (INTVAL (newval)
728 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
730 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
731 CONST_INT is not valid, because after the replacement, the
732 original mode would be gone. Unfortunately, we can't tell
733 when do_SUBST is called to replace the operand thereof, so we
734 perform this test on oldval instead, checking whether an
735 invalid replacement took place before we got here. */
736 gcc_assert (!(GET_CODE (oldval) == SUBREG
737 && CONST_INT_P (SUBREG_REG (oldval))));
738 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
739 && CONST_INT_P (XEXP (oldval, 0))));
742 if (undobuf.frees)
743 buf = undobuf.frees, undobuf.frees = buf->next;
744 else
745 buf = XNEW (struct undo);
747 buf->kind = UNDO_RTX;
748 buf->where.r = into;
749 buf->old_contents.r = oldval;
750 *into = newval;
752 buf->next = undobuf.undos, undobuf.undos = buf;
755 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
757 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
758 for the value of a HOST_WIDE_INT value (including CONST_INT) is
759 not safe. */
761 static void
762 do_SUBST_INT (int *into, int newval)
764 struct undo *buf;
765 int oldval = *into;
767 if (oldval == newval)
768 return;
770 if (undobuf.frees)
771 buf = undobuf.frees, undobuf.frees = buf->next;
772 else
773 buf = XNEW (struct undo);
775 buf->kind = UNDO_INT;
776 buf->where.i = into;
777 buf->old_contents.i = oldval;
778 *into = newval;
780 buf->next = undobuf.undos, undobuf.undos = buf;
783 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
785 /* Similar to SUBST, but just substitute the mode. This is used when
786 changing the mode of a pseudo-register, so that any other
787 references to the entry in the regno_reg_rtx array will change as
788 well. */
790 static void
791 do_SUBST_MODE (rtx *into, machine_mode newval)
793 struct undo *buf;
794 machine_mode oldval = GET_MODE (*into);
796 if (oldval == newval)
797 return;
799 if (undobuf.frees)
800 buf = undobuf.frees, undobuf.frees = buf->next;
801 else
802 buf = XNEW (struct undo);
804 buf->kind = UNDO_MODE;
805 buf->where.r = into;
806 buf->old_contents.m = oldval;
807 adjust_reg_mode (*into, newval);
809 buf->next = undobuf.undos, undobuf.undos = buf;
812 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
814 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
816 static void
817 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
819 struct undo *buf;
820 struct insn_link * oldval = *into;
822 if (oldval == newval)
823 return;
825 if (undobuf.frees)
826 buf = undobuf.frees, undobuf.frees = buf->next;
827 else
828 buf = XNEW (struct undo);
830 buf->kind = UNDO_LINKS;
831 buf->where.l = into;
832 buf->old_contents.l = oldval;
833 *into = newval;
835 buf->next = undobuf.undos, undobuf.undos = buf;
838 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
840 /* Subroutine of try_combine. Determine whether the replacement patterns
841 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
842 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
843 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
844 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
845 of all the instructions can be estimated and the replacements are more
846 expensive than the original sequence. */
848 static bool
849 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
850 rtx newpat, rtx newi2pat, rtx newotherpat)
852 int i0_cost, i1_cost, i2_cost, i3_cost;
853 int new_i2_cost, new_i3_cost;
854 int old_cost, new_cost;
856 /* Lookup the original insn_costs. */
857 i2_cost = INSN_COST (i2);
858 i3_cost = INSN_COST (i3);
860 if (i1)
862 i1_cost = INSN_COST (i1);
863 if (i0)
865 i0_cost = INSN_COST (i0);
866 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
867 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
869 else
871 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
872 ? i1_cost + i2_cost + i3_cost : 0);
873 i0_cost = 0;
876 else
878 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
879 i1_cost = i0_cost = 0;
882 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
883 correct that. */
884 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
885 old_cost -= i1_cost;
888 /* Calculate the replacement insn_costs. */
889 rtx tmp = PATTERN (i3);
890 PATTERN (i3) = newpat;
891 int tmpi = INSN_CODE (i3);
892 INSN_CODE (i3) = -1;
893 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
894 PATTERN (i3) = tmp;
895 INSN_CODE (i3) = tmpi;
896 if (newi2pat)
898 tmp = PATTERN (i2);
899 PATTERN (i2) = newi2pat;
900 tmpi = INSN_CODE (i2);
901 INSN_CODE (i2) = -1;
902 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
903 PATTERN (i2) = tmp;
904 INSN_CODE (i2) = tmpi;
905 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
906 ? new_i2_cost + new_i3_cost : 0;
908 else
910 new_cost = new_i3_cost;
911 new_i2_cost = 0;
914 if (undobuf.other_insn)
916 int old_other_cost, new_other_cost;
918 old_other_cost = INSN_COST (undobuf.other_insn);
919 tmp = PATTERN (undobuf.other_insn);
920 PATTERN (undobuf.other_insn) = newotherpat;
921 tmpi = INSN_CODE (undobuf.other_insn);
922 INSN_CODE (undobuf.other_insn) = -1;
923 new_other_cost = insn_cost (undobuf.other_insn,
924 optimize_this_for_speed_p);
925 PATTERN (undobuf.other_insn) = tmp;
926 INSN_CODE (undobuf.other_insn) = tmpi;
927 if (old_other_cost > 0 && new_other_cost > 0)
929 old_cost += old_other_cost;
930 new_cost += new_other_cost;
932 else
933 old_cost = 0;
936 /* Disallow this combination if both new_cost and old_cost are greater than
937 zero, and new_cost is greater than old cost. */
938 int reject = old_cost > 0 && new_cost > old_cost;
940 if (dump_file)
942 fprintf (dump_file, "%s combination of insns ",
943 reject ? "rejecting" : "allowing");
944 if (i0)
945 fprintf (dump_file, "%d, ", INSN_UID (i0));
946 if (i1 && INSN_UID (i1) != INSN_UID (i2))
947 fprintf (dump_file, "%d, ", INSN_UID (i1));
948 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
950 fprintf (dump_file, "original costs ");
951 if (i0)
952 fprintf (dump_file, "%d + ", i0_cost);
953 if (i1 && INSN_UID (i1) != INSN_UID (i2))
954 fprintf (dump_file, "%d + ", i1_cost);
955 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
957 if (newi2pat)
958 fprintf (dump_file, "replacement costs %d + %d = %d\n",
959 new_i2_cost, new_i3_cost, new_cost);
960 else
961 fprintf (dump_file, "replacement cost %d\n", new_cost);
964 if (reject)
965 return false;
967 /* Update the uid_insn_cost array with the replacement costs. */
968 INSN_COST (i2) = new_i2_cost;
969 INSN_COST (i3) = new_i3_cost;
970 if (i1)
972 INSN_COST (i1) = 0;
973 if (i0)
974 INSN_COST (i0) = 0;
977 return true;
981 /* Delete any insns that copy a register to itself. */
983 static void
984 delete_noop_moves (void)
986 rtx_insn *insn, *next;
987 basic_block bb;
989 FOR_EACH_BB_FN (bb, cfun)
991 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
993 next = NEXT_INSN (insn);
994 if (INSN_P (insn) && noop_move_p (insn))
996 if (dump_file)
997 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
999 delete_insn_and_edges (insn);
1006 /* Return false if we do not want to (or cannot) combine DEF. */
1007 static bool
1008 can_combine_def_p (df_ref def)
1010 /* Do not consider if it is pre/post modification in MEM. */
1011 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1012 return false;
1014 unsigned int regno = DF_REF_REGNO (def);
1016 /* Do not combine frame pointer adjustments. */
1017 if ((regno == FRAME_POINTER_REGNUM
1018 && (!reload_completed || frame_pointer_needed))
1019 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1020 && regno == HARD_FRAME_POINTER_REGNUM
1021 && (!reload_completed || frame_pointer_needed))
1022 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1023 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1024 return false;
1026 return true;
1029 /* Return false if we do not want to (or cannot) combine USE. */
1030 static bool
1031 can_combine_use_p (df_ref use)
1033 /* Do not consider the usage of the stack pointer by function call. */
1034 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1035 return false;
1037 return true;
1040 /* Fill in log links field for all insns. */
1042 static void
1043 create_log_links (void)
1045 basic_block bb;
1046 rtx_insn **next_use;
1047 rtx_insn *insn;
1048 df_ref def, use;
1050 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1052 /* Pass through each block from the end, recording the uses of each
1053 register and establishing log links when def is encountered.
1054 Note that we do not clear next_use array in order to save time,
1055 so we have to test whether the use is in the same basic block as def.
1057 There are a few cases below when we do not consider the definition or
1058 usage -- these are taken from original flow.c did. Don't ask me why it is
1059 done this way; I don't know and if it works, I don't want to know. */
1061 FOR_EACH_BB_FN (bb, cfun)
1063 FOR_BB_INSNS_REVERSE (bb, insn)
1065 if (!NONDEBUG_INSN_P (insn))
1066 continue;
1068 /* Log links are created only once. */
1069 gcc_assert (!LOG_LINKS (insn));
1071 FOR_EACH_INSN_DEF (def, insn)
1073 unsigned int regno = DF_REF_REGNO (def);
1074 rtx_insn *use_insn;
1076 if (!next_use[regno])
1077 continue;
1079 if (!can_combine_def_p (def))
1080 continue;
1082 use_insn = next_use[regno];
1083 next_use[regno] = NULL;
1085 if (BLOCK_FOR_INSN (use_insn) != bb)
1086 continue;
1088 /* flow.c claimed:
1090 We don't build a LOG_LINK for hard registers contained
1091 in ASM_OPERANDs. If these registers get replaced,
1092 we might wind up changing the semantics of the insn,
1093 even if reload can make what appear to be valid
1094 assignments later. */
1095 if (regno < FIRST_PSEUDO_REGISTER
1096 && asm_noperands (PATTERN (use_insn)) >= 0)
1097 continue;
1099 /* Don't add duplicate links between instructions. */
1100 struct insn_link *links;
1101 FOR_EACH_LOG_LINK (links, use_insn)
1102 if (insn == links->insn && regno == links->regno)
1103 break;
1105 if (!links)
1106 LOG_LINKS (use_insn)
1107 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1110 FOR_EACH_INSN_USE (use, insn)
1111 if (can_combine_use_p (use))
1112 next_use[DF_REF_REGNO (use)] = insn;
1116 free (next_use);
1119 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1120 true if we found a LOG_LINK that proves that A feeds B. This only works
1121 if there are no instructions between A and B which could have a link
1122 depending on A, since in that case we would not record a link for B.
1123 We also check the implicit dependency created by a cc0 setter/user
1124 pair. */
1126 static bool
1127 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1129 struct insn_link *links;
1130 FOR_EACH_LOG_LINK (links, b)
1131 if (links->insn == a)
1132 return true;
1133 if (HAVE_cc0 && sets_cc0_p (a))
1134 return true;
1135 return false;
1138 /* Main entry point for combiner. F is the first insn of the function.
1139 NREGS is the first unused pseudo-reg number.
1141 Return nonzero if the combiner has turned an indirect jump
1142 instruction into a direct jump. */
1143 static int
1144 combine_instructions (rtx_insn *f, unsigned int nregs)
1146 rtx_insn *insn, *next;
1147 rtx_insn *prev;
1148 struct insn_link *links, *nextlinks;
1149 rtx_insn *first;
1150 basic_block last_bb;
1152 int new_direct_jump_p = 0;
1154 for (first = f; first && !NONDEBUG_INSN_P (first); )
1155 first = NEXT_INSN (first);
1156 if (!first)
1157 return 0;
1159 combine_attempts = 0;
1160 combine_merges = 0;
1161 combine_extras = 0;
1162 combine_successes = 0;
1164 rtl_hooks = combine_rtl_hooks;
1166 reg_stat.safe_grow_cleared (nregs);
1168 init_recog_no_volatile ();
1170 /* Allocate array for insn info. */
1171 max_uid_known = get_max_uid ();
1172 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1173 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1174 gcc_obstack_init (&insn_link_obstack);
1176 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1178 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1179 problems when, for example, we have j <<= 1 in a loop. */
1181 nonzero_sign_valid = 0;
1182 label_tick = label_tick_ebb_start = 1;
1184 /* Scan all SETs and see if we can deduce anything about what
1185 bits are known to be zero for some registers and how many copies
1186 of the sign bit are known to exist for those registers.
1188 Also set any known values so that we can use it while searching
1189 for what bits are known to be set. */
1191 setup_incoming_promotions (first);
1192 /* Allow the entry block and the first block to fall into the same EBB.
1193 Conceptually the incoming promotions are assigned to the entry block. */
1194 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1196 create_log_links ();
1197 FOR_EACH_BB_FN (this_basic_block, cfun)
1199 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1200 last_call_luid = 0;
1201 mem_last_set = -1;
1203 label_tick++;
1204 if (!single_pred_p (this_basic_block)
1205 || single_pred (this_basic_block) != last_bb)
1206 label_tick_ebb_start = label_tick;
1207 last_bb = this_basic_block;
1209 FOR_BB_INSNS (this_basic_block, insn)
1210 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1212 rtx links;
1214 subst_low_luid = DF_INSN_LUID (insn);
1215 subst_insn = insn;
1217 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1218 insn);
1219 record_dead_and_set_regs (insn);
1221 if (AUTO_INC_DEC)
1222 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1223 if (REG_NOTE_KIND (links) == REG_INC)
1224 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1225 insn);
1227 /* Record the current insn_cost of this instruction. */
1228 if (NONJUMP_INSN_P (insn))
1229 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1230 if (dump_file)
1232 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1233 dump_insn_slim (dump_file, insn);
1238 nonzero_sign_valid = 1;
1240 /* Now scan all the insns in forward order. */
1241 label_tick = label_tick_ebb_start = 1;
1242 init_reg_last ();
1243 setup_incoming_promotions (first);
1244 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1245 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1247 FOR_EACH_BB_FN (this_basic_block, cfun)
1249 rtx_insn *last_combined_insn = NULL;
1251 /* Ignore instruction combination in basic blocks that are going to
1252 be removed as unreachable anyway. See PR82386. */
1253 if (EDGE_COUNT (this_basic_block->preds) == 0)
1254 continue;
1256 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1257 last_call_luid = 0;
1258 mem_last_set = -1;
1260 label_tick++;
1261 if (!single_pred_p (this_basic_block)
1262 || single_pred (this_basic_block) != last_bb)
1263 label_tick_ebb_start = label_tick;
1264 last_bb = this_basic_block;
1266 rtl_profile_for_bb (this_basic_block);
1267 for (insn = BB_HEAD (this_basic_block);
1268 insn != NEXT_INSN (BB_END (this_basic_block));
1269 insn = next ? next : NEXT_INSN (insn))
1271 next = 0;
1272 if (!NONDEBUG_INSN_P (insn))
1273 continue;
1275 while (last_combined_insn
1276 && (!NONDEBUG_INSN_P (last_combined_insn)
1277 || last_combined_insn->deleted ()))
1278 last_combined_insn = PREV_INSN (last_combined_insn);
1279 if (last_combined_insn == NULL_RTX
1280 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1281 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1282 last_combined_insn = insn;
1284 /* See if we know about function return values before this
1285 insn based upon SUBREG flags. */
1286 check_promoted_subreg (insn, PATTERN (insn));
1288 /* See if we can find hardregs and subreg of pseudos in
1289 narrower modes. This could help turning TRUNCATEs
1290 into SUBREGs. */
1291 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1293 /* Try this insn with each insn it links back to. */
1295 FOR_EACH_LOG_LINK (links, insn)
1296 if ((next = try_combine (insn, links->insn, NULL,
1297 NULL, &new_direct_jump_p,
1298 last_combined_insn)) != 0)
1300 statistics_counter_event (cfun, "two-insn combine", 1);
1301 goto retry;
1304 /* Try each sequence of three linked insns ending with this one. */
1306 if (max_combine >= 3)
1307 FOR_EACH_LOG_LINK (links, insn)
1309 rtx_insn *link = links->insn;
1311 /* If the linked insn has been replaced by a note, then there
1312 is no point in pursuing this chain any further. */
1313 if (NOTE_P (link))
1314 continue;
1316 FOR_EACH_LOG_LINK (nextlinks, link)
1317 if ((next = try_combine (insn, link, nextlinks->insn,
1318 NULL, &new_direct_jump_p,
1319 last_combined_insn)) != 0)
1321 statistics_counter_event (cfun, "three-insn combine", 1);
1322 goto retry;
1326 /* Try to combine a jump insn that uses CC0
1327 with a preceding insn that sets CC0, and maybe with its
1328 logical predecessor as well.
1329 This is how we make decrement-and-branch insns.
1330 We need this special code because data flow connections
1331 via CC0 do not get entered in LOG_LINKS. */
1333 if (HAVE_cc0
1334 && JUMP_P (insn)
1335 && (prev = prev_nonnote_insn (insn)) != 0
1336 && NONJUMP_INSN_P (prev)
1337 && sets_cc0_p (PATTERN (prev)))
1339 if ((next = try_combine (insn, prev, NULL, NULL,
1340 &new_direct_jump_p,
1341 last_combined_insn)) != 0)
1342 goto retry;
1344 FOR_EACH_LOG_LINK (nextlinks, prev)
1345 if ((next = try_combine (insn, prev, nextlinks->insn,
1346 NULL, &new_direct_jump_p,
1347 last_combined_insn)) != 0)
1348 goto retry;
1351 /* Do the same for an insn that explicitly references CC0. */
1352 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1353 && (prev = prev_nonnote_insn (insn)) != 0
1354 && NONJUMP_INSN_P (prev)
1355 && sets_cc0_p (PATTERN (prev))
1356 && GET_CODE (PATTERN (insn)) == SET
1357 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1359 if ((next = try_combine (insn, prev, NULL, NULL,
1360 &new_direct_jump_p,
1361 last_combined_insn)) != 0)
1362 goto retry;
1364 FOR_EACH_LOG_LINK (nextlinks, prev)
1365 if ((next = try_combine (insn, prev, nextlinks->insn,
1366 NULL, &new_direct_jump_p,
1367 last_combined_insn)) != 0)
1368 goto retry;
1371 /* Finally, see if any of the insns that this insn links to
1372 explicitly references CC0. If so, try this insn, that insn,
1373 and its predecessor if it sets CC0. */
1374 if (HAVE_cc0)
1376 FOR_EACH_LOG_LINK (links, insn)
1377 if (NONJUMP_INSN_P (links->insn)
1378 && GET_CODE (PATTERN (links->insn)) == SET
1379 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1380 && (prev = prev_nonnote_insn (links->insn)) != 0
1381 && NONJUMP_INSN_P (prev)
1382 && sets_cc0_p (PATTERN (prev))
1383 && (next = try_combine (insn, links->insn,
1384 prev, NULL, &new_direct_jump_p,
1385 last_combined_insn)) != 0)
1386 goto retry;
1389 /* Try combining an insn with two different insns whose results it
1390 uses. */
1391 if (max_combine >= 3)
1392 FOR_EACH_LOG_LINK (links, insn)
1393 for (nextlinks = links->next; nextlinks;
1394 nextlinks = nextlinks->next)
1395 if ((next = try_combine (insn, links->insn,
1396 nextlinks->insn, NULL,
1397 &new_direct_jump_p,
1398 last_combined_insn)) != 0)
1401 statistics_counter_event (cfun, "three-insn combine", 1);
1402 goto retry;
1405 /* Try four-instruction combinations. */
1406 if (max_combine >= 4)
1407 FOR_EACH_LOG_LINK (links, insn)
1409 struct insn_link *next1;
1410 rtx_insn *link = links->insn;
1412 /* If the linked insn has been replaced by a note, then there
1413 is no point in pursuing this chain any further. */
1414 if (NOTE_P (link))
1415 continue;
1417 FOR_EACH_LOG_LINK (next1, link)
1419 rtx_insn *link1 = next1->insn;
1420 if (NOTE_P (link1))
1421 continue;
1422 /* I0 -> I1 -> I2 -> I3. */
1423 FOR_EACH_LOG_LINK (nextlinks, link1)
1424 if ((next = try_combine (insn, link, link1,
1425 nextlinks->insn,
1426 &new_direct_jump_p,
1427 last_combined_insn)) != 0)
1429 statistics_counter_event (cfun, "four-insn combine", 1);
1430 goto retry;
1432 /* I0, I1 -> I2, I2 -> I3. */
1433 for (nextlinks = next1->next; nextlinks;
1434 nextlinks = nextlinks->next)
1435 if ((next = try_combine (insn, link, link1,
1436 nextlinks->insn,
1437 &new_direct_jump_p,
1438 last_combined_insn)) != 0)
1440 statistics_counter_event (cfun, "four-insn combine", 1);
1441 goto retry;
1445 for (next1 = links->next; next1; next1 = next1->next)
1447 rtx_insn *link1 = next1->insn;
1448 if (NOTE_P (link1))
1449 continue;
1450 /* I0 -> I2; I1, I2 -> I3. */
1451 FOR_EACH_LOG_LINK (nextlinks, link)
1452 if ((next = try_combine (insn, link, link1,
1453 nextlinks->insn,
1454 &new_direct_jump_p,
1455 last_combined_insn)) != 0)
1457 statistics_counter_event (cfun, "four-insn combine", 1);
1458 goto retry;
1460 /* I0 -> I1; I1, I2 -> I3. */
1461 FOR_EACH_LOG_LINK (nextlinks, link1)
1462 if ((next = try_combine (insn, link, link1,
1463 nextlinks->insn,
1464 &new_direct_jump_p,
1465 last_combined_insn)) != 0)
1467 statistics_counter_event (cfun, "four-insn combine", 1);
1468 goto retry;
1473 /* Try this insn with each REG_EQUAL note it links back to. */
1474 FOR_EACH_LOG_LINK (links, insn)
1476 rtx set, note;
1477 rtx_insn *temp = links->insn;
1478 if ((set = single_set (temp)) != 0
1479 && (note = find_reg_equal_equiv_note (temp)) != 0
1480 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1481 /* Avoid using a register that may already been marked
1482 dead by an earlier instruction. */
1483 && ! unmentioned_reg_p (note, SET_SRC (set))
1484 && (GET_MODE (note) == VOIDmode
1485 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1486 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1487 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1488 || (GET_MODE (XEXP (SET_DEST (set), 0))
1489 == GET_MODE (note))))))
1491 /* Temporarily replace the set's source with the
1492 contents of the REG_EQUAL note. The insn will
1493 be deleted or recognized by try_combine. */
1494 rtx orig_src = SET_SRC (set);
1495 rtx orig_dest = SET_DEST (set);
1496 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1497 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1498 SET_SRC (set) = note;
1499 i2mod = temp;
1500 i2mod_old_rhs = copy_rtx (orig_src);
1501 i2mod_new_rhs = copy_rtx (note);
1502 next = try_combine (insn, i2mod, NULL, NULL,
1503 &new_direct_jump_p,
1504 last_combined_insn);
1505 i2mod = NULL;
1506 if (next)
1508 statistics_counter_event (cfun, "insn-with-note combine", 1);
1509 goto retry;
1511 SET_SRC (set) = orig_src;
1512 SET_DEST (set) = orig_dest;
1516 if (!NOTE_P (insn))
1517 record_dead_and_set_regs (insn);
1519 retry:
1524 default_rtl_profile ();
1525 clear_bb_flags ();
1526 new_direct_jump_p |= purge_all_dead_edges ();
1527 delete_noop_moves ();
1529 /* Clean up. */
1530 obstack_free (&insn_link_obstack, NULL);
1531 free (uid_log_links);
1532 free (uid_insn_cost);
1533 reg_stat.release ();
1536 struct undo *undo, *next;
1537 for (undo = undobuf.frees; undo; undo = next)
1539 next = undo->next;
1540 free (undo);
1542 undobuf.frees = 0;
1545 total_attempts += combine_attempts;
1546 total_merges += combine_merges;
1547 total_extras += combine_extras;
1548 total_successes += combine_successes;
1550 nonzero_sign_valid = 0;
1551 rtl_hooks = general_rtl_hooks;
1553 /* Make recognizer allow volatile MEMs again. */
1554 init_recog ();
1556 return new_direct_jump_p;
1559 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1561 static void
1562 init_reg_last (void)
1564 unsigned int i;
1565 reg_stat_type *p;
1567 FOR_EACH_VEC_ELT (reg_stat, i, p)
1568 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1571 /* Set up any promoted values for incoming argument registers. */
1573 static void
1574 setup_incoming_promotions (rtx_insn *first)
1576 tree arg;
1577 bool strictly_local = false;
1579 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1580 arg = DECL_CHAIN (arg))
1582 rtx x, reg = DECL_INCOMING_RTL (arg);
1583 int uns1, uns3;
1584 machine_mode mode1, mode2, mode3, mode4;
1586 /* Only continue if the incoming argument is in a register. */
1587 if (!REG_P (reg))
1588 continue;
1590 /* Determine, if possible, whether all call sites of the current
1591 function lie within the current compilation unit. (This does
1592 take into account the exporting of a function via taking its
1593 address, and so forth.) */
1594 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1596 /* The mode and signedness of the argument before any promotions happen
1597 (equal to the mode of the pseudo holding it at that stage). */
1598 mode1 = TYPE_MODE (TREE_TYPE (arg));
1599 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1601 /* The mode and signedness of the argument after any source language and
1602 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1603 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1604 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1606 /* The mode and signedness of the argument as it is actually passed,
1607 see assign_parm_setup_reg in function.c. */
1608 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1609 TREE_TYPE (cfun->decl), 0);
1611 /* The mode of the register in which the argument is being passed. */
1612 mode4 = GET_MODE (reg);
1614 /* Eliminate sign extensions in the callee when:
1615 (a) A mode promotion has occurred; */
1616 if (mode1 == mode3)
1617 continue;
1618 /* (b) The mode of the register is the same as the mode of
1619 the argument as it is passed; */
1620 if (mode3 != mode4)
1621 continue;
1622 /* (c) There's no language level extension; */
1623 if (mode1 == mode2)
1625 /* (c.1) All callers are from the current compilation unit. If that's
1626 the case we don't have to rely on an ABI, we only have to know
1627 what we're generating right now, and we know that we will do the
1628 mode1 to mode2 promotion with the given sign. */
1629 else if (!strictly_local)
1630 continue;
1631 /* (c.2) The combination of the two promotions is useful. This is
1632 true when the signs match, or if the first promotion is unsigned.
1633 In the later case, (sign_extend (zero_extend x)) is the same as
1634 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1635 else if (uns1)
1636 uns3 = true;
1637 else if (uns3)
1638 continue;
1640 /* Record that the value was promoted from mode1 to mode3,
1641 so that any sign extension at the head of the current
1642 function may be eliminated. */
1643 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1644 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1645 record_value_for_reg (reg, first, x);
1649 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1650 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1651 because some machines (maybe most) will actually do the sign-extension and
1652 this is the conservative approach.
1654 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1655 kludge. */
1657 static rtx
1658 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1660 scalar_int_mode int_mode;
1661 if (CONST_INT_P (src)
1662 && is_a <scalar_int_mode> (mode, &int_mode)
1663 && GET_MODE_PRECISION (int_mode) < prec
1664 && INTVAL (src) > 0
1665 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1666 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1668 return src;
1671 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1672 and SET. */
1674 static void
1675 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1676 rtx x)
1678 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1679 unsigned HOST_WIDE_INT bits = 0;
1680 rtx reg_equal = NULL, src = SET_SRC (set);
1681 unsigned int num = 0;
1683 if (reg_equal_note)
1684 reg_equal = XEXP (reg_equal_note, 0);
1686 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1688 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1689 if (reg_equal)
1690 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1693 /* Don't call nonzero_bits if it cannot change anything. */
1694 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1696 bits = nonzero_bits (src, nonzero_bits_mode);
1697 if (reg_equal && bits)
1698 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1699 rsp->nonzero_bits |= bits;
1702 /* Don't call num_sign_bit_copies if it cannot change anything. */
1703 if (rsp->sign_bit_copies != 1)
1705 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1706 if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1708 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1709 if (num == 0 || numeq > num)
1710 num = numeq;
1712 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1713 rsp->sign_bit_copies = num;
1717 /* Called via note_stores. If X is a pseudo that is narrower than
1718 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1720 If we are setting only a portion of X and we can't figure out what
1721 portion, assume all bits will be used since we don't know what will
1722 be happening.
1724 Similarly, set how many bits of X are known to be copies of the sign bit
1725 at all locations in the function. This is the smallest number implied
1726 by any set of X. */
1728 static void
1729 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1731 rtx_insn *insn = (rtx_insn *) data;
1732 scalar_int_mode mode;
1734 if (REG_P (x)
1735 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1736 /* If this register is undefined at the start of the file, we can't
1737 say what its contents were. */
1738 && ! REGNO_REG_SET_P
1739 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1740 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1741 && HWI_COMPUTABLE_MODE_P (mode))
1743 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1745 if (set == 0 || GET_CODE (set) == CLOBBER)
1747 rsp->nonzero_bits = GET_MODE_MASK (mode);
1748 rsp->sign_bit_copies = 1;
1749 return;
1752 /* If this register is being initialized using itself, and the
1753 register is uninitialized in this basic block, and there are
1754 no LOG_LINKS which set the register, then part of the
1755 register is uninitialized. In that case we can't assume
1756 anything about the number of nonzero bits.
1758 ??? We could do better if we checked this in
1759 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1760 could avoid making assumptions about the insn which initially
1761 sets the register, while still using the information in other
1762 insns. We would have to be careful to check every insn
1763 involved in the combination. */
1765 if (insn
1766 && reg_referenced_p (x, PATTERN (insn))
1767 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1768 REGNO (x)))
1770 struct insn_link *link;
1772 FOR_EACH_LOG_LINK (link, insn)
1773 if (dead_or_set_p (link->insn, x))
1774 break;
1775 if (!link)
1777 rsp->nonzero_bits = GET_MODE_MASK (mode);
1778 rsp->sign_bit_copies = 1;
1779 return;
1783 /* If this is a complex assignment, see if we can convert it into a
1784 simple assignment. */
1785 set = expand_field_assignment (set);
1787 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1788 set what we know about X. */
1790 if (SET_DEST (set) == x
1791 || (paradoxical_subreg_p (SET_DEST (set))
1792 && SUBREG_REG (SET_DEST (set)) == x))
1793 update_rsp_from_reg_equal (rsp, insn, set, x);
1794 else
1796 rsp->nonzero_bits = GET_MODE_MASK (mode);
1797 rsp->sign_bit_copies = 1;
1802 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1803 optionally insns that were previously combined into I3 or that will be
1804 combined into the merger of INSN and I3. The order is PRED, PRED2,
1805 INSN, SUCC, SUCC2, I3.
1807 Return 0 if the combination is not allowed for any reason.
1809 If the combination is allowed, *PDEST will be set to the single
1810 destination of INSN and *PSRC to the single source, and this function
1811 will return 1. */
1813 static int
1814 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1815 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1816 rtx *pdest, rtx *psrc)
1818 int i;
1819 const_rtx set = 0;
1820 rtx src, dest;
1821 rtx_insn *p;
1822 rtx link;
1823 bool all_adjacent = true;
1824 int (*is_volatile_p) (const_rtx);
1826 if (succ)
1828 if (succ2)
1830 if (next_active_insn (succ2) != i3)
1831 all_adjacent = false;
1832 if (next_active_insn (succ) != succ2)
1833 all_adjacent = false;
1835 else if (next_active_insn (succ) != i3)
1836 all_adjacent = false;
1837 if (next_active_insn (insn) != succ)
1838 all_adjacent = false;
1840 else if (next_active_insn (insn) != i3)
1841 all_adjacent = false;
1843 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1844 or a PARALLEL consisting of such a SET and CLOBBERs.
1846 If INSN has CLOBBER parallel parts, ignore them for our processing.
1847 By definition, these happen during the execution of the insn. When it
1848 is merged with another insn, all bets are off. If they are, in fact,
1849 needed and aren't also supplied in I3, they may be added by
1850 recog_for_combine. Otherwise, it won't match.
1852 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1853 note.
1855 Get the source and destination of INSN. If more than one, can't
1856 combine. */
1858 if (GET_CODE (PATTERN (insn)) == SET)
1859 set = PATTERN (insn);
1860 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1861 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1863 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1865 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1867 switch (GET_CODE (elt))
1869 /* This is important to combine floating point insns
1870 for the SH4 port. */
1871 case USE:
1872 /* Combining an isolated USE doesn't make sense.
1873 We depend here on combinable_i3pat to reject them. */
1874 /* The code below this loop only verifies that the inputs of
1875 the SET in INSN do not change. We call reg_set_between_p
1876 to verify that the REG in the USE does not change between
1877 I3 and INSN.
1878 If the USE in INSN was for a pseudo register, the matching
1879 insn pattern will likely match any register; combining this
1880 with any other USE would only be safe if we knew that the
1881 used registers have identical values, or if there was
1882 something to tell them apart, e.g. different modes. For
1883 now, we forgo such complicated tests and simply disallow
1884 combining of USES of pseudo registers with any other USE. */
1885 if (REG_P (XEXP (elt, 0))
1886 && GET_CODE (PATTERN (i3)) == PARALLEL)
1888 rtx i3pat = PATTERN (i3);
1889 int i = XVECLEN (i3pat, 0) - 1;
1890 unsigned int regno = REGNO (XEXP (elt, 0));
1894 rtx i3elt = XVECEXP (i3pat, 0, i);
1896 if (GET_CODE (i3elt) == USE
1897 && REG_P (XEXP (i3elt, 0))
1898 && (REGNO (XEXP (i3elt, 0)) == regno
1899 ? reg_set_between_p (XEXP (elt, 0),
1900 PREV_INSN (insn), i3)
1901 : regno >= FIRST_PSEUDO_REGISTER))
1902 return 0;
1904 while (--i >= 0);
1906 break;
1908 /* We can ignore CLOBBERs. */
1909 case CLOBBER:
1910 break;
1912 case SET:
1913 /* Ignore SETs whose result isn't used but not those that
1914 have side-effects. */
1915 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1916 && insn_nothrow_p (insn)
1917 && !side_effects_p (elt))
1918 break;
1920 /* If we have already found a SET, this is a second one and
1921 so we cannot combine with this insn. */
1922 if (set)
1923 return 0;
1925 set = elt;
1926 break;
1928 default:
1929 /* Anything else means we can't combine. */
1930 return 0;
1934 if (set == 0
1935 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1936 so don't do anything with it. */
1937 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1938 return 0;
1940 else
1941 return 0;
1943 if (set == 0)
1944 return 0;
1946 /* The simplification in expand_field_assignment may call back to
1947 get_last_value, so set safe guard here. */
1948 subst_low_luid = DF_INSN_LUID (insn);
1950 set = expand_field_assignment (set);
1951 src = SET_SRC (set), dest = SET_DEST (set);
1953 /* Do not eliminate user-specified register if it is in an
1954 asm input because we may break the register asm usage defined
1955 in GCC manual if allow to do so.
1956 Be aware that this may cover more cases than we expect but this
1957 should be harmless. */
1958 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1959 && extract_asm_operands (PATTERN (i3)))
1960 return 0;
1962 /* Don't eliminate a store in the stack pointer. */
1963 if (dest == stack_pointer_rtx
1964 /* Don't combine with an insn that sets a register to itself if it has
1965 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1966 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1967 /* Can't merge an ASM_OPERANDS. */
1968 || GET_CODE (src) == ASM_OPERANDS
1969 /* Can't merge a function call. */
1970 || GET_CODE (src) == CALL
1971 /* Don't eliminate a function call argument. */
1972 || (CALL_P (i3)
1973 && (find_reg_fusage (i3, USE, dest)
1974 || (REG_P (dest)
1975 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1976 && global_regs[REGNO (dest)])))
1977 /* Don't substitute into an incremented register. */
1978 || FIND_REG_INC_NOTE (i3, dest)
1979 || (succ && FIND_REG_INC_NOTE (succ, dest))
1980 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1981 /* Don't substitute into a non-local goto, this confuses CFG. */
1982 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1983 /* Make sure that DEST is not used after INSN but before SUCC, or
1984 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1985 || (!all_adjacent
1986 && ((succ2
1987 && (reg_used_between_p (dest, succ2, i3)
1988 || reg_used_between_p (dest, succ, succ2)))
1989 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1990 || (succ
1991 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1992 that case SUCC is not in the insn stream, so use SUCC2
1993 instead for this test. */
1994 && reg_used_between_p (dest, insn,
1995 succ2
1996 && INSN_UID (succ) == INSN_UID (succ2)
1997 ? succ2 : succ))))
1998 /* Make sure that the value that is to be substituted for the register
1999 does not use any registers whose values alter in between. However,
2000 If the insns are adjacent, a use can't cross a set even though we
2001 think it might (this can happen for a sequence of insns each setting
2002 the same destination; last_set of that register might point to
2003 a NOTE). If INSN has a REG_EQUIV note, the register is always
2004 equivalent to the memory so the substitution is valid even if there
2005 are intervening stores. Also, don't move a volatile asm or
2006 UNSPEC_VOLATILE across any other insns. */
2007 || (! all_adjacent
2008 && (((!MEM_P (src)
2009 || ! find_reg_note (insn, REG_EQUIV, src))
2010 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
2011 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2012 || GET_CODE (src) == UNSPEC_VOLATILE))
2013 /* Don't combine across a CALL_INSN, because that would possibly
2014 change whether the life span of some REGs crosses calls or not,
2015 and it is a pain to update that information.
2016 Exception: if source is a constant, moving it later can't hurt.
2017 Accept that as a special case. */
2018 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2019 return 0;
2021 /* DEST must either be a REG or CC0. */
2022 if (REG_P (dest))
2024 /* If register alignment is being enforced for multi-word items in all
2025 cases except for parameters, it is possible to have a register copy
2026 insn referencing a hard register that is not allowed to contain the
2027 mode being copied and which would not be valid as an operand of most
2028 insns. Eliminate this problem by not combining with such an insn.
2030 Also, on some machines we don't want to extend the life of a hard
2031 register. */
2033 if (REG_P (src)
2034 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2035 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2036 /* Don't extend the life of a hard register unless it is
2037 user variable (if we have few registers) or it can't
2038 fit into the desired register (meaning something special
2039 is going on).
2040 Also avoid substituting a return register into I3, because
2041 reload can't handle a conflict with constraints of other
2042 inputs. */
2043 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2044 && !targetm.hard_regno_mode_ok (REGNO (src),
2045 GET_MODE (src)))))
2046 return 0;
2048 else if (GET_CODE (dest) != CC0)
2049 return 0;
2052 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2053 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2054 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2056 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2058 /* If the clobber represents an earlyclobber operand, we must not
2059 substitute an expression containing the clobbered register.
2060 As we do not analyze the constraint strings here, we have to
2061 make the conservative assumption. However, if the register is
2062 a fixed hard reg, the clobber cannot represent any operand;
2063 we leave it up to the machine description to either accept or
2064 reject use-and-clobber patterns. */
2065 if (!REG_P (reg)
2066 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2067 || !fixed_regs[REGNO (reg)])
2068 if (reg_overlap_mentioned_p (reg, src))
2069 return 0;
2072 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2073 or not), reject, unless nothing volatile comes between it and I3 */
2075 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2077 /* Make sure neither succ nor succ2 contains a volatile reference. */
2078 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2079 return 0;
2080 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2081 return 0;
2082 /* We'll check insns between INSN and I3 below. */
2085 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2086 to be an explicit register variable, and was chosen for a reason. */
2088 if (GET_CODE (src) == ASM_OPERANDS
2089 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2090 return 0;
2092 /* If INSN contains volatile references (specifically volatile MEMs),
2093 we cannot combine across any other volatile references.
2094 Even if INSN doesn't contain volatile references, any intervening
2095 volatile insn might affect machine state. */
2097 is_volatile_p = volatile_refs_p (PATTERN (insn))
2098 ? volatile_refs_p
2099 : volatile_insn_p;
2101 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2102 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2103 return 0;
2105 /* If INSN contains an autoincrement or autodecrement, make sure that
2106 register is not used between there and I3, and not already used in
2107 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2108 Also insist that I3 not be a jump; if it were one
2109 and the incremented register were spilled, we would lose. */
2111 if (AUTO_INC_DEC)
2112 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2113 if (REG_NOTE_KIND (link) == REG_INC
2114 && (JUMP_P (i3)
2115 || reg_used_between_p (XEXP (link, 0), insn, i3)
2116 || (pred != NULL_RTX
2117 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2118 || (pred2 != NULL_RTX
2119 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2120 || (succ != NULL_RTX
2121 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2122 || (succ2 != NULL_RTX
2123 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2124 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2125 return 0;
2127 /* Don't combine an insn that follows a CC0-setting insn.
2128 An insn that uses CC0 must not be separated from the one that sets it.
2129 We do, however, allow I2 to follow a CC0-setting insn if that insn
2130 is passed as I1; in that case it will be deleted also.
2131 We also allow combining in this case if all the insns are adjacent
2132 because that would leave the two CC0 insns adjacent as well.
2133 It would be more logical to test whether CC0 occurs inside I1 or I2,
2134 but that would be much slower, and this ought to be equivalent. */
2136 if (HAVE_cc0)
2138 p = prev_nonnote_insn (insn);
2139 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2140 && ! all_adjacent)
2141 return 0;
2144 /* If we get here, we have passed all the tests and the combination is
2145 to be allowed. */
2147 *pdest = dest;
2148 *psrc = src;
2150 return 1;
2153 /* LOC is the location within I3 that contains its pattern or the component
2154 of a PARALLEL of the pattern. We validate that it is valid for combining.
2156 One problem is if I3 modifies its output, as opposed to replacing it
2157 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2158 doing so would produce an insn that is not equivalent to the original insns.
2160 Consider:
2162 (set (reg:DI 101) (reg:DI 100))
2163 (set (subreg:SI (reg:DI 101) 0) <foo>)
2165 This is NOT equivalent to:
2167 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2168 (set (reg:DI 101) (reg:DI 100))])
2170 Not only does this modify 100 (in which case it might still be valid
2171 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2173 We can also run into a problem if I2 sets a register that I1
2174 uses and I1 gets directly substituted into I3 (not via I2). In that
2175 case, we would be getting the wrong value of I2DEST into I3, so we
2176 must reject the combination. This case occurs when I2 and I1 both
2177 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2178 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2179 of a SET must prevent combination from occurring. The same situation
2180 can occur for I0, in which case I0_NOT_IN_SRC is set.
2182 Before doing the above check, we first try to expand a field assignment
2183 into a set of logical operations.
2185 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2186 we place a register that is both set and used within I3. If more than one
2187 such register is detected, we fail.
2189 Return 1 if the combination is valid, zero otherwise. */
2191 static int
2192 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2193 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2195 rtx x = *loc;
2197 if (GET_CODE (x) == SET)
2199 rtx set = x ;
2200 rtx dest = SET_DEST (set);
2201 rtx src = SET_SRC (set);
2202 rtx inner_dest = dest;
2203 rtx subdest;
2205 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2206 || GET_CODE (inner_dest) == SUBREG
2207 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2208 inner_dest = XEXP (inner_dest, 0);
2210 /* Check for the case where I3 modifies its output, as discussed
2211 above. We don't want to prevent pseudos from being combined
2212 into the address of a MEM, so only prevent the combination if
2213 i1 or i2 set the same MEM. */
2214 if ((inner_dest != dest &&
2215 (!MEM_P (inner_dest)
2216 || rtx_equal_p (i2dest, inner_dest)
2217 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2218 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2219 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2220 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2221 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2223 /* This is the same test done in can_combine_p except we can't test
2224 all_adjacent; we don't have to, since this instruction will stay
2225 in place, thus we are not considering increasing the lifetime of
2226 INNER_DEST.
2228 Also, if this insn sets a function argument, combining it with
2229 something that might need a spill could clobber a previous
2230 function argument; the all_adjacent test in can_combine_p also
2231 checks this; here, we do a more specific test for this case. */
2233 || (REG_P (inner_dest)
2234 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2235 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2236 GET_MODE (inner_dest)))
2237 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2238 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2239 return 0;
2241 /* If DEST is used in I3, it is being killed in this insn, so
2242 record that for later. We have to consider paradoxical
2243 subregs here, since they kill the whole register, but we
2244 ignore partial subregs, STRICT_LOW_PART, etc.
2245 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2246 STACK_POINTER_REGNUM, since these are always considered to be
2247 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2248 subdest = dest;
2249 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2250 subdest = SUBREG_REG (subdest);
2251 if (pi3dest_killed
2252 && REG_P (subdest)
2253 && reg_referenced_p (subdest, PATTERN (i3))
2254 && REGNO (subdest) != FRAME_POINTER_REGNUM
2255 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2256 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2257 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2258 || (REGNO (subdest) != ARG_POINTER_REGNUM
2259 || ! fixed_regs [REGNO (subdest)]))
2260 && REGNO (subdest) != STACK_POINTER_REGNUM)
2262 if (*pi3dest_killed)
2263 return 0;
2265 *pi3dest_killed = subdest;
2269 else if (GET_CODE (x) == PARALLEL)
2271 int i;
2273 for (i = 0; i < XVECLEN (x, 0); i++)
2274 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2275 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2276 return 0;
2279 return 1;
2282 /* Return 1 if X is an arithmetic expression that contains a multiplication
2283 and division. We don't count multiplications by powers of two here. */
2285 static int
2286 contains_muldiv (rtx x)
2288 switch (GET_CODE (x))
2290 case MOD: case DIV: case UMOD: case UDIV:
2291 return 1;
2293 case MULT:
2294 return ! (CONST_INT_P (XEXP (x, 1))
2295 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2296 default:
2297 if (BINARY_P (x))
2298 return contains_muldiv (XEXP (x, 0))
2299 || contains_muldiv (XEXP (x, 1));
2301 if (UNARY_P (x))
2302 return contains_muldiv (XEXP (x, 0));
2304 return 0;
2308 /* Determine whether INSN can be used in a combination. Return nonzero if
2309 not. This is used in try_combine to detect early some cases where we
2310 can't perform combinations. */
2312 static int
2313 cant_combine_insn_p (rtx_insn *insn)
2315 rtx set;
2316 rtx src, dest;
2318 /* If this isn't really an insn, we can't do anything.
2319 This can occur when flow deletes an insn that it has merged into an
2320 auto-increment address. */
2321 if (!NONDEBUG_INSN_P (insn))
2322 return 1;
2324 /* Never combine loads and stores involving hard regs that are likely
2325 to be spilled. The register allocator can usually handle such
2326 reg-reg moves by tying. If we allow the combiner to make
2327 substitutions of likely-spilled regs, reload might die.
2328 As an exception, we allow combinations involving fixed regs; these are
2329 not available to the register allocator so there's no risk involved. */
2331 set = single_set (insn);
2332 if (! set)
2333 return 0;
2334 src = SET_SRC (set);
2335 dest = SET_DEST (set);
2336 if (GET_CODE (src) == SUBREG)
2337 src = SUBREG_REG (src);
2338 if (GET_CODE (dest) == SUBREG)
2339 dest = SUBREG_REG (dest);
2340 if (REG_P (src) && REG_P (dest)
2341 && ((HARD_REGISTER_P (src)
2342 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2343 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2344 || (HARD_REGISTER_P (dest)
2345 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2346 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2347 return 1;
2349 return 0;
2352 struct likely_spilled_retval_info
2354 unsigned regno, nregs;
2355 unsigned mask;
2358 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2359 hard registers that are known to be written to / clobbered in full. */
2360 static void
2361 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2363 struct likely_spilled_retval_info *const info =
2364 (struct likely_spilled_retval_info *) data;
2365 unsigned regno, nregs;
2366 unsigned new_mask;
2368 if (!REG_P (XEXP (set, 0)))
2369 return;
2370 regno = REGNO (x);
2371 if (regno >= info->regno + info->nregs)
2372 return;
2373 nregs = REG_NREGS (x);
2374 if (regno + nregs <= info->regno)
2375 return;
2376 new_mask = (2U << (nregs - 1)) - 1;
2377 if (regno < info->regno)
2378 new_mask >>= info->regno - regno;
2379 else
2380 new_mask <<= regno - info->regno;
2381 info->mask &= ~new_mask;
2384 /* Return nonzero iff part of the return value is live during INSN, and
2385 it is likely spilled. This can happen when more than one insn is needed
2386 to copy the return value, e.g. when we consider to combine into the
2387 second copy insn for a complex value. */
2389 static int
2390 likely_spilled_retval_p (rtx_insn *insn)
2392 rtx_insn *use = BB_END (this_basic_block);
2393 rtx reg;
2394 rtx_insn *p;
2395 unsigned regno, nregs;
2396 /* We assume here that no machine mode needs more than
2397 32 hard registers when the value overlaps with a register
2398 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2399 unsigned mask;
2400 struct likely_spilled_retval_info info;
2402 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2403 return 0;
2404 reg = XEXP (PATTERN (use), 0);
2405 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2406 return 0;
2407 regno = REGNO (reg);
2408 nregs = REG_NREGS (reg);
2409 if (nregs == 1)
2410 return 0;
2411 mask = (2U << (nregs - 1)) - 1;
2413 /* Disregard parts of the return value that are set later. */
2414 info.regno = regno;
2415 info.nregs = nregs;
2416 info.mask = mask;
2417 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2418 if (INSN_P (p))
2419 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2420 mask = info.mask;
2422 /* Check if any of the (probably) live return value registers is
2423 likely spilled. */
2424 nregs --;
2427 if ((mask & 1 << nregs)
2428 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2429 return 1;
2430 } while (nregs--);
2431 return 0;
2434 /* Adjust INSN after we made a change to its destination.
2436 Changing the destination can invalidate notes that say something about
2437 the results of the insn and a LOG_LINK pointing to the insn. */
2439 static void
2440 adjust_for_new_dest (rtx_insn *insn)
2442 /* For notes, be conservative and simply remove them. */
2443 remove_reg_equal_equiv_notes (insn);
2445 /* The new insn will have a destination that was previously the destination
2446 of an insn just above it. Call distribute_links to make a LOG_LINK from
2447 the next use of that destination. */
2449 rtx set = single_set (insn);
2450 gcc_assert (set);
2452 rtx reg = SET_DEST (set);
2454 while (GET_CODE (reg) == ZERO_EXTRACT
2455 || GET_CODE (reg) == STRICT_LOW_PART
2456 || GET_CODE (reg) == SUBREG)
2457 reg = XEXP (reg, 0);
2458 gcc_assert (REG_P (reg));
2460 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2462 df_insn_rescan (insn);
2465 /* Return TRUE if combine can reuse reg X in mode MODE.
2466 ADDED_SETS is nonzero if the original set is still required. */
2467 static bool
2468 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2470 unsigned int regno;
2472 if (!REG_P (x))
2473 return false;
2475 /* Don't change between modes with different underlying register sizes,
2476 since this could lead to invalid subregs. */
2477 if (REGMODE_NATURAL_SIZE (mode)
2478 != REGMODE_NATURAL_SIZE (GET_MODE (x)))
2479 return false;
2481 regno = REGNO (x);
2482 /* Allow hard registers if the new mode is legal, and occupies no more
2483 registers than the old mode. */
2484 if (regno < FIRST_PSEUDO_REGISTER)
2485 return (targetm.hard_regno_mode_ok (regno, mode)
2486 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2488 /* Or a pseudo that is only used once. */
2489 return (regno < reg_n_sets_max
2490 && REG_N_SETS (regno) == 1
2491 && !added_sets
2492 && !REG_USERVAR_P (x));
2496 /* Check whether X, the destination of a set, refers to part of
2497 the register specified by REG. */
2499 static bool
2500 reg_subword_p (rtx x, rtx reg)
2502 /* Check that reg is an integer mode register. */
2503 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2504 return false;
2506 if (GET_CODE (x) == STRICT_LOW_PART
2507 || GET_CODE (x) == ZERO_EXTRACT)
2508 x = XEXP (x, 0);
2510 return GET_CODE (x) == SUBREG
2511 && SUBREG_REG (x) == reg
2512 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2515 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2516 Note that the INSN should be deleted *after* removing dead edges, so
2517 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2518 but not for a (set (pc) (label_ref FOO)). */
2520 static void
2521 update_cfg_for_uncondjump (rtx_insn *insn)
2523 basic_block bb = BLOCK_FOR_INSN (insn);
2524 gcc_assert (BB_END (bb) == insn);
2526 purge_dead_edges (bb);
2528 delete_insn (insn);
2529 if (EDGE_COUNT (bb->succs) == 1)
2531 rtx_insn *insn;
2533 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2535 /* Remove barriers from the footer if there are any. */
2536 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2537 if (BARRIER_P (insn))
2539 if (PREV_INSN (insn))
2540 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2541 else
2542 BB_FOOTER (bb) = NEXT_INSN (insn);
2543 if (NEXT_INSN (insn))
2544 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2546 else if (LABEL_P (insn))
2547 break;
2551 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2552 by an arbitrary number of CLOBBERs. */
2553 static bool
2554 is_parallel_of_n_reg_sets (rtx pat, int n)
2556 if (GET_CODE (pat) != PARALLEL)
2557 return false;
2559 int len = XVECLEN (pat, 0);
2560 if (len < n)
2561 return false;
2563 int i;
2564 for (i = 0; i < n; i++)
2565 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2566 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2567 return false;
2568 for ( ; i < len; i++)
2569 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2570 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2571 return false;
2573 return true;
2576 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2577 CLOBBERs), can be split into individual SETs in that order, without
2578 changing semantics. */
2579 static bool
2580 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2582 if (!insn_nothrow_p (insn))
2583 return false;
2585 rtx pat = PATTERN (insn);
2587 int i, j;
2588 for (i = 0; i < n; i++)
2590 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2591 return false;
2593 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2595 for (j = i + 1; j < n; j++)
2596 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2597 return false;
2600 return true;
2603 /* Try to combine the insns I0, I1 and I2 into I3.
2604 Here I0, I1 and I2 appear earlier than I3.
2605 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2608 If we are combining more than two insns and the resulting insn is not
2609 recognized, try splitting it into two insns. If that happens, I2 and I3
2610 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2611 Otherwise, I0, I1 and I2 are pseudo-deleted.
2613 Return 0 if the combination does not work. Then nothing is changed.
2614 If we did the combination, return the insn at which combine should
2615 resume scanning.
2617 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2618 new direct jump instruction.
2620 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2621 been I3 passed to an earlier try_combine within the same basic
2622 block. */
2624 static rtx_insn *
2625 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2626 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2628 /* New patterns for I3 and I2, respectively. */
2629 rtx newpat, newi2pat = 0;
2630 rtvec newpat_vec_with_clobbers = 0;
2631 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2632 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2633 dead. */
2634 int added_sets_0, added_sets_1, added_sets_2;
2635 /* Total number of SETs to put into I3. */
2636 int total_sets;
2637 /* Nonzero if I2's or I1's body now appears in I3. */
2638 int i2_is_used = 0, i1_is_used = 0;
2639 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2640 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2641 /* Contains I3 if the destination of I3 is used in its source, which means
2642 that the old life of I3 is being killed. If that usage is placed into
2643 I2 and not in I3, a REG_DEAD note must be made. */
2644 rtx i3dest_killed = 0;
2645 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2646 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2647 /* Copy of SET_SRC of I1 and I0, if needed. */
2648 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2649 /* Set if I2DEST was reused as a scratch register. */
2650 bool i2scratch = false;
2651 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2652 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2653 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2654 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2655 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2656 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2657 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2658 /* Notes that must be added to REG_NOTES in I3 and I2. */
2659 rtx new_i3_notes, new_i2_notes;
2660 /* Notes that we substituted I3 into I2 instead of the normal case. */
2661 int i3_subst_into_i2 = 0;
2662 /* Notes that I1, I2 or I3 is a MULT operation. */
2663 int have_mult = 0;
2664 int swap_i2i3 = 0;
2665 int changed_i3_dest = 0;
2667 int maxreg;
2668 rtx_insn *temp_insn;
2669 rtx temp_expr;
2670 struct insn_link *link;
2671 rtx other_pat = 0;
2672 rtx new_other_notes;
2673 int i;
2674 scalar_int_mode dest_mode, temp_mode;
2676 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2677 never be). */
2678 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2679 return 0;
2681 /* Only try four-insn combinations when there's high likelihood of
2682 success. Look for simple insns, such as loads of constants or
2683 binary operations involving a constant. */
2684 if (i0)
2686 int i;
2687 int ngood = 0;
2688 int nshift = 0;
2689 rtx set0, set3;
2691 if (!flag_expensive_optimizations)
2692 return 0;
2694 for (i = 0; i < 4; i++)
2696 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2697 rtx set = single_set (insn);
2698 rtx src;
2699 if (!set)
2700 continue;
2701 src = SET_SRC (set);
2702 if (CONSTANT_P (src))
2704 ngood += 2;
2705 break;
2707 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2708 ngood++;
2709 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2710 || GET_CODE (src) == LSHIFTRT)
2711 nshift++;
2714 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2715 are likely manipulating its value. Ideally we'll be able to combine
2716 all four insns into a bitfield insertion of some kind.
2718 Note the source in I0 might be inside a sign/zero extension and the
2719 memory modes in I0 and I3 might be different. So extract the address
2720 from the destination of I3 and search for it in the source of I0.
2722 In the event that there's a match but the source/dest do not actually
2723 refer to the same memory, the worst that happens is we try some
2724 combinations that we wouldn't have otherwise. */
2725 if ((set0 = single_set (i0))
2726 /* Ensure the source of SET0 is a MEM, possibly buried inside
2727 an extension. */
2728 && (GET_CODE (SET_SRC (set0)) == MEM
2729 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2730 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2731 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2732 && (set3 = single_set (i3))
2733 /* Ensure the destination of SET3 is a MEM. */
2734 && GET_CODE (SET_DEST (set3)) == MEM
2735 /* Would it be better to extract the base address for the MEM
2736 in SET3 and look for that? I don't have cases where it matters
2737 but I could envision such cases. */
2738 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2739 ngood += 2;
2741 if (ngood < 2 && nshift < 2)
2742 return 0;
2745 /* Exit early if one of the insns involved can't be used for
2746 combinations. */
2747 if (CALL_P (i2)
2748 || (i1 && CALL_P (i1))
2749 || (i0 && CALL_P (i0))
2750 || cant_combine_insn_p (i3)
2751 || cant_combine_insn_p (i2)
2752 || (i1 && cant_combine_insn_p (i1))
2753 || (i0 && cant_combine_insn_p (i0))
2754 || likely_spilled_retval_p (i3))
2755 return 0;
2757 combine_attempts++;
2758 undobuf.other_insn = 0;
2760 /* Reset the hard register usage information. */
2761 CLEAR_HARD_REG_SET (newpat_used_regs);
2763 if (dump_file && (dump_flags & TDF_DETAILS))
2765 if (i0)
2766 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2767 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2768 else if (i1)
2769 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2770 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2771 else
2772 fprintf (dump_file, "\nTrying %d -> %d:\n",
2773 INSN_UID (i2), INSN_UID (i3));
2775 if (i0)
2776 dump_insn_slim (dump_file, i0);
2777 if (i1)
2778 dump_insn_slim (dump_file, i1);
2779 dump_insn_slim (dump_file, i2);
2780 dump_insn_slim (dump_file, i3);
2783 /* If multiple insns feed into one of I2 or I3, they can be in any
2784 order. To simplify the code below, reorder them in sequence. */
2785 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2786 std::swap (i0, i2);
2787 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2788 std::swap (i0, i1);
2789 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2790 std::swap (i1, i2);
2792 added_links_insn = 0;
2794 /* First check for one important special case that the code below will
2795 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2796 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2797 we may be able to replace that destination with the destination of I3.
2798 This occurs in the common code where we compute both a quotient and
2799 remainder into a structure, in which case we want to do the computation
2800 directly into the structure to avoid register-register copies.
2802 Note that this case handles both multiple sets in I2 and also cases
2803 where I2 has a number of CLOBBERs inside the PARALLEL.
2805 We make very conservative checks below and only try to handle the
2806 most common cases of this. For example, we only handle the case
2807 where I2 and I3 are adjacent to avoid making difficult register
2808 usage tests. */
2810 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2811 && REG_P (SET_SRC (PATTERN (i3)))
2812 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2813 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2814 && GET_CODE (PATTERN (i2)) == PARALLEL
2815 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2816 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2817 below would need to check what is inside (and reg_overlap_mentioned_p
2818 doesn't support those codes anyway). Don't allow those destinations;
2819 the resulting insn isn't likely to be recognized anyway. */
2820 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2821 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2822 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2823 SET_DEST (PATTERN (i3)))
2824 && next_active_insn (i2) == i3)
2826 rtx p2 = PATTERN (i2);
2828 /* Make sure that the destination of I3,
2829 which we are going to substitute into one output of I2,
2830 is not used within another output of I2. We must avoid making this:
2831 (parallel [(set (mem (reg 69)) ...)
2832 (set (reg 69) ...)])
2833 which is not well-defined as to order of actions.
2834 (Besides, reload can't handle output reloads for this.)
2836 The problem can also happen if the dest of I3 is a memory ref,
2837 if another dest in I2 is an indirect memory ref.
2839 Neither can this PARALLEL be an asm. We do not allow combining
2840 that usually (see can_combine_p), so do not here either. */
2841 bool ok = true;
2842 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2844 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2845 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2846 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2847 SET_DEST (XVECEXP (p2, 0, i))))
2848 ok = false;
2849 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2850 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2851 ok = false;
2854 if (ok)
2855 for (i = 0; i < XVECLEN (p2, 0); i++)
2856 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2857 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2859 combine_merges++;
2861 subst_insn = i3;
2862 subst_low_luid = DF_INSN_LUID (i2);
2864 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2865 i2src = SET_SRC (XVECEXP (p2, 0, i));
2866 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2867 i2dest_killed = dead_or_set_p (i2, i2dest);
2869 /* Replace the dest in I2 with our dest and make the resulting
2870 insn the new pattern for I3. Then skip to where we validate
2871 the pattern. Everything was set up above. */
2872 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2873 newpat = p2;
2874 i3_subst_into_i2 = 1;
2875 goto validate_replacement;
2879 /* If I2 is setting a pseudo to a constant and I3 is setting some
2880 sub-part of it to another constant, merge them by making a new
2881 constant. */
2882 if (i1 == 0
2883 && (temp_expr = single_set (i2)) != 0
2884 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2885 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2886 && GET_CODE (PATTERN (i3)) == SET
2887 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2888 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2890 rtx dest = SET_DEST (PATTERN (i3));
2891 rtx temp_dest = SET_DEST (temp_expr);
2892 int offset = -1;
2893 int width = 0;
2895 if (GET_CODE (dest) == ZERO_EXTRACT)
2897 if (CONST_INT_P (XEXP (dest, 1))
2898 && CONST_INT_P (XEXP (dest, 2))
2899 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2900 &dest_mode))
2902 width = INTVAL (XEXP (dest, 1));
2903 offset = INTVAL (XEXP (dest, 2));
2904 dest = XEXP (dest, 0);
2905 if (BITS_BIG_ENDIAN)
2906 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2909 else
2911 if (GET_CODE (dest) == STRICT_LOW_PART)
2912 dest = XEXP (dest, 0);
2913 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2915 width = GET_MODE_PRECISION (dest_mode);
2916 offset = 0;
2920 if (offset >= 0)
2922 /* If this is the low part, we're done. */
2923 if (subreg_lowpart_p (dest))
2925 /* Handle the case where inner is twice the size of outer. */
2926 else if (GET_MODE_PRECISION (temp_mode)
2927 == 2 * GET_MODE_PRECISION (dest_mode))
2928 offset += GET_MODE_PRECISION (dest_mode);
2929 /* Otherwise give up for now. */
2930 else
2931 offset = -1;
2934 if (offset >= 0)
2936 rtx inner = SET_SRC (PATTERN (i3));
2937 rtx outer = SET_SRC (temp_expr);
2939 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2940 rtx_mode_t (inner, dest_mode),
2941 offset, width);
2943 combine_merges++;
2944 subst_insn = i3;
2945 subst_low_luid = DF_INSN_LUID (i2);
2946 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2947 i2dest = temp_dest;
2948 i2dest_killed = dead_or_set_p (i2, i2dest);
2950 /* Replace the source in I2 with the new constant and make the
2951 resulting insn the new pattern for I3. Then skip to where we
2952 validate the pattern. Everything was set up above. */
2953 SUBST (SET_SRC (temp_expr),
2954 immed_wide_int_const (o, temp_mode));
2956 newpat = PATTERN (i2);
2958 /* The dest of I3 has been replaced with the dest of I2. */
2959 changed_i3_dest = 1;
2960 goto validate_replacement;
2964 /* If we have no I1 and I2 looks like:
2965 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2966 (set Y OP)])
2967 make up a dummy I1 that is
2968 (set Y OP)
2969 and change I2 to be
2970 (set (reg:CC X) (compare:CC Y (const_int 0)))
2972 (We can ignore any trailing CLOBBERs.)
2974 This undoes a previous combination and allows us to match a branch-and-
2975 decrement insn. */
2977 if (!HAVE_cc0 && i1 == 0
2978 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2979 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2980 == MODE_CC)
2981 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2982 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2983 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2984 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2985 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2986 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2988 /* We make I1 with the same INSN_UID as I2. This gives it
2989 the same DF_INSN_LUID for value tracking. Our fake I1 will
2990 never appear in the insn stream so giving it the same INSN_UID
2991 as I2 will not cause a problem. */
2993 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2994 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2995 -1, NULL_RTX);
2996 INSN_UID (i1) = INSN_UID (i2);
2998 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2999 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3000 SET_DEST (PATTERN (i1)));
3001 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3002 SUBST_LINK (LOG_LINKS (i2),
3003 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3006 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3007 make those two SETs separate I1 and I2 insns, and make an I0 that is
3008 the original I1. */
3009 if (!HAVE_cc0 && i0 == 0
3010 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3011 && can_split_parallel_of_n_reg_sets (i2, 2)
3012 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3013 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3015 /* If there is no I1, there is no I0 either. */
3016 i0 = i1;
3018 /* We make I1 with the same INSN_UID as I2. This gives it
3019 the same DF_INSN_LUID for value tracking. Our fake I1 will
3020 never appear in the insn stream so giving it the same INSN_UID
3021 as I2 will not cause a problem. */
3023 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3024 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3025 -1, NULL_RTX);
3026 INSN_UID (i1) = INSN_UID (i2);
3028 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3031 /* Verify that I2 and I1 are valid for combining. */
3032 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
3033 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
3034 &i1dest, &i1src))
3035 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
3036 &i0dest, &i0src)))
3038 undo_all ();
3039 return 0;
3042 /* Record whether I2DEST is used in I2SRC and similarly for the other
3043 cases. Knowing this will help in register status updating below. */
3044 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3045 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3046 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3047 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3048 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3049 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3050 i2dest_killed = dead_or_set_p (i2, i2dest);
3051 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3052 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3054 /* For the earlier insns, determine which of the subsequent ones they
3055 feed. */
3056 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3057 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3058 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3059 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3060 && reg_overlap_mentioned_p (i0dest, i2src))));
3062 /* Ensure that I3's pattern can be the destination of combines. */
3063 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3064 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3065 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3066 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3067 &i3dest_killed))
3069 undo_all ();
3070 return 0;
3073 /* See if any of the insns is a MULT operation. Unless one is, we will
3074 reject a combination that is, since it must be slower. Be conservative
3075 here. */
3076 if (GET_CODE (i2src) == MULT
3077 || (i1 != 0 && GET_CODE (i1src) == MULT)
3078 || (i0 != 0 && GET_CODE (i0src) == MULT)
3079 || (GET_CODE (PATTERN (i3)) == SET
3080 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3081 have_mult = 1;
3083 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3084 We used to do this EXCEPT in one case: I3 has a post-inc in an
3085 output operand. However, that exception can give rise to insns like
3086 mov r3,(r3)+
3087 which is a famous insn on the PDP-11 where the value of r3 used as the
3088 source was model-dependent. Avoid this sort of thing. */
3090 #if 0
3091 if (!(GET_CODE (PATTERN (i3)) == SET
3092 && REG_P (SET_SRC (PATTERN (i3)))
3093 && MEM_P (SET_DEST (PATTERN (i3)))
3094 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3095 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3096 /* It's not the exception. */
3097 #endif
3098 if (AUTO_INC_DEC)
3100 rtx link;
3101 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3102 if (REG_NOTE_KIND (link) == REG_INC
3103 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3104 || (i1 != 0
3105 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3107 undo_all ();
3108 return 0;
3112 /* See if the SETs in I1 or I2 need to be kept around in the merged
3113 instruction: whenever the value set there is still needed past I3.
3114 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3116 For the SET in I1, we have two cases: if I1 and I2 independently feed
3117 into I3, the set in I1 needs to be kept around unless I1DEST dies
3118 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3119 in I1 needs to be kept around unless I1DEST dies or is set in either
3120 I2 or I3. The same considerations apply to I0. */
3122 added_sets_2 = !dead_or_set_p (i3, i2dest);
3124 if (i1)
3125 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3126 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3127 else
3128 added_sets_1 = 0;
3130 if (i0)
3131 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3132 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3133 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3134 && dead_or_set_p (i2, i0dest)));
3135 else
3136 added_sets_0 = 0;
3138 /* We are about to copy insns for the case where they need to be kept
3139 around. Check that they can be copied in the merged instruction. */
3141 if (targetm.cannot_copy_insn_p
3142 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3143 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3144 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3146 undo_all ();
3147 return 0;
3150 /* If the set in I2 needs to be kept around, we must make a copy of
3151 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3152 PATTERN (I2), we are only substituting for the original I1DEST, not into
3153 an already-substituted copy. This also prevents making self-referential
3154 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3155 I2DEST. */
3157 if (added_sets_2)
3159 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3160 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3161 else
3162 i2pat = copy_rtx (PATTERN (i2));
3165 if (added_sets_1)
3167 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3168 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3169 else
3170 i1pat = copy_rtx (PATTERN (i1));
3173 if (added_sets_0)
3175 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3176 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3177 else
3178 i0pat = copy_rtx (PATTERN (i0));
3181 combine_merges++;
3183 /* Substitute in the latest insn for the regs set by the earlier ones. */
3185 maxreg = max_reg_num ();
3187 subst_insn = i3;
3189 /* Many machines that don't use CC0 have insns that can both perform an
3190 arithmetic operation and set the condition code. These operations will
3191 be represented as a PARALLEL with the first element of the vector
3192 being a COMPARE of an arithmetic operation with the constant zero.
3193 The second element of the vector will set some pseudo to the result
3194 of the same arithmetic operation. If we simplify the COMPARE, we won't
3195 match such a pattern and so will generate an extra insn. Here we test
3196 for this case, where both the comparison and the operation result are
3197 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3198 I2SRC. Later we will make the PARALLEL that contains I2. */
3200 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3201 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3202 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3203 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3205 rtx newpat_dest;
3206 rtx *cc_use_loc = NULL;
3207 rtx_insn *cc_use_insn = NULL;
3208 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3209 machine_mode compare_mode, orig_compare_mode;
3210 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3211 scalar_int_mode mode;
3213 newpat = PATTERN (i3);
3214 newpat_dest = SET_DEST (newpat);
3215 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3217 if (undobuf.other_insn == 0
3218 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3219 &cc_use_insn)))
3221 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3222 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3223 compare_code = simplify_compare_const (compare_code, mode,
3224 op0, &op1);
3225 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3228 /* Do the rest only if op1 is const0_rtx, which may be the
3229 result of simplification. */
3230 if (op1 == const0_rtx)
3232 /* If a single use of the CC is found, prepare to modify it
3233 when SELECT_CC_MODE returns a new CC-class mode, or when
3234 the above simplify_compare_const() returned a new comparison
3235 operator. undobuf.other_insn is assigned the CC use insn
3236 when modifying it. */
3237 if (cc_use_loc)
3239 #ifdef SELECT_CC_MODE
3240 machine_mode new_mode
3241 = SELECT_CC_MODE (compare_code, op0, op1);
3242 if (new_mode != orig_compare_mode
3243 && can_change_dest_mode (SET_DEST (newpat),
3244 added_sets_2, new_mode))
3246 unsigned int regno = REGNO (newpat_dest);
3247 compare_mode = new_mode;
3248 if (regno < FIRST_PSEUDO_REGISTER)
3249 newpat_dest = gen_rtx_REG (compare_mode, regno);
3250 else
3252 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3253 newpat_dest = regno_reg_rtx[regno];
3256 #endif
3257 /* Cases for modifying the CC-using comparison. */
3258 if (compare_code != orig_compare_code
3259 /* ??? Do we need to verify the zero rtx? */
3260 && XEXP (*cc_use_loc, 1) == const0_rtx)
3262 /* Replace cc_use_loc with entire new RTX. */
3263 SUBST (*cc_use_loc,
3264 gen_rtx_fmt_ee (compare_code, compare_mode,
3265 newpat_dest, const0_rtx));
3266 undobuf.other_insn = cc_use_insn;
3268 else if (compare_mode != orig_compare_mode)
3270 /* Just replace the CC reg with a new mode. */
3271 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3272 undobuf.other_insn = cc_use_insn;
3276 /* Now we modify the current newpat:
3277 First, SET_DEST(newpat) is updated if the CC mode has been
3278 altered. For targets without SELECT_CC_MODE, this should be
3279 optimized away. */
3280 if (compare_mode != orig_compare_mode)
3281 SUBST (SET_DEST (newpat), newpat_dest);
3282 /* This is always done to propagate i2src into newpat. */
3283 SUBST (SET_SRC (newpat),
3284 gen_rtx_COMPARE (compare_mode, op0, op1));
3285 /* Create new version of i2pat if needed; the below PARALLEL
3286 creation needs this to work correctly. */
3287 if (! rtx_equal_p (i2src, op0))
3288 i2pat = gen_rtx_SET (i2dest, op0);
3289 i2_is_used = 1;
3293 if (i2_is_used == 0)
3295 /* It is possible that the source of I2 or I1 may be performing
3296 an unneeded operation, such as a ZERO_EXTEND of something
3297 that is known to have the high part zero. Handle that case
3298 by letting subst look at the inner insns.
3300 Another way to do this would be to have a function that tries
3301 to simplify a single insn instead of merging two or more
3302 insns. We don't do this because of the potential of infinite
3303 loops and because of the potential extra memory required.
3304 However, doing it the way we are is a bit of a kludge and
3305 doesn't catch all cases.
3307 But only do this if -fexpensive-optimizations since it slows
3308 things down and doesn't usually win.
3310 This is not done in the COMPARE case above because the
3311 unmodified I2PAT is used in the PARALLEL and so a pattern
3312 with a modified I2SRC would not match. */
3314 if (flag_expensive_optimizations)
3316 /* Pass pc_rtx so no substitutions are done, just
3317 simplifications. */
3318 if (i1)
3320 subst_low_luid = DF_INSN_LUID (i1);
3321 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3324 subst_low_luid = DF_INSN_LUID (i2);
3325 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3328 n_occurrences = 0; /* `subst' counts here */
3329 subst_low_luid = DF_INSN_LUID (i2);
3331 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3332 copy of I2SRC each time we substitute it, in order to avoid creating
3333 self-referential RTL when we will be substituting I1SRC for I1DEST
3334 later. Likewise if I0 feeds into I2, either directly or indirectly
3335 through I1, and I0DEST is in I0SRC. */
3336 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3337 (i1_feeds_i2_n && i1dest_in_i1src)
3338 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3339 && i0dest_in_i0src));
3340 substed_i2 = 1;
3342 /* Record whether I2's body now appears within I3's body. */
3343 i2_is_used = n_occurrences;
3346 /* If we already got a failure, don't try to do more. Otherwise, try to
3347 substitute I1 if we have it. */
3349 if (i1 && GET_CODE (newpat) != CLOBBER)
3351 /* Check that an autoincrement side-effect on I1 has not been lost.
3352 This happens if I1DEST is mentioned in I2 and dies there, and
3353 has disappeared from the new pattern. */
3354 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3355 && i1_feeds_i2_n
3356 && dead_or_set_p (i2, i1dest)
3357 && !reg_overlap_mentioned_p (i1dest, newpat))
3358 /* Before we can do this substitution, we must redo the test done
3359 above (see detailed comments there) that ensures I1DEST isn't
3360 mentioned in any SETs in NEWPAT that are field assignments. */
3361 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3362 0, 0, 0))
3364 undo_all ();
3365 return 0;
3368 n_occurrences = 0;
3369 subst_low_luid = DF_INSN_LUID (i1);
3371 /* If the following substitution will modify I1SRC, make a copy of it
3372 for the case where it is substituted for I1DEST in I2PAT later. */
3373 if (added_sets_2 && i1_feeds_i2_n)
3374 i1src_copy = copy_rtx (i1src);
3376 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3377 copy of I1SRC each time we substitute it, in order to avoid creating
3378 self-referential RTL when we will be substituting I0SRC for I0DEST
3379 later. */
3380 newpat = subst (newpat, i1dest, i1src, 0, 0,
3381 i0_feeds_i1_n && i0dest_in_i0src);
3382 substed_i1 = 1;
3384 /* Record whether I1's body now appears within I3's body. */
3385 i1_is_used = n_occurrences;
3388 /* Likewise for I0 if we have it. */
3390 if (i0 && GET_CODE (newpat) != CLOBBER)
3392 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3393 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3394 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3395 && !reg_overlap_mentioned_p (i0dest, newpat))
3396 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3397 0, 0, 0))
3399 undo_all ();
3400 return 0;
3403 /* If the following substitution will modify I0SRC, make a copy of it
3404 for the case where it is substituted for I0DEST in I1PAT later. */
3405 if (added_sets_1 && i0_feeds_i1_n)
3406 i0src_copy = copy_rtx (i0src);
3407 /* And a copy for I0DEST in I2PAT substitution. */
3408 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3409 || (i0_feeds_i2_n)))
3410 i0src_copy2 = copy_rtx (i0src);
3412 n_occurrences = 0;
3413 subst_low_luid = DF_INSN_LUID (i0);
3414 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3415 substed_i0 = 1;
3418 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3419 to count all the ways that I2SRC and I1SRC can be used. */
3420 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3421 && i2_is_used + added_sets_2 > 1)
3422 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3423 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3424 > 1))
3425 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3426 && (n_occurrences + added_sets_0
3427 + (added_sets_1 && i0_feeds_i1_n)
3428 + (added_sets_2 && i0_feeds_i2_n)
3429 > 1))
3430 /* Fail if we tried to make a new register. */
3431 || max_reg_num () != maxreg
3432 /* Fail if we couldn't do something and have a CLOBBER. */
3433 || GET_CODE (newpat) == CLOBBER
3434 /* Fail if this new pattern is a MULT and we didn't have one before
3435 at the outer level. */
3436 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3437 && ! have_mult))
3439 undo_all ();
3440 return 0;
3443 /* If the actions of the earlier insns must be kept
3444 in addition to substituting them into the latest one,
3445 we must make a new PARALLEL for the latest insn
3446 to hold additional the SETs. */
3448 if (added_sets_0 || added_sets_1 || added_sets_2)
3450 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3451 combine_extras++;
3453 if (GET_CODE (newpat) == PARALLEL)
3455 rtvec old = XVEC (newpat, 0);
3456 total_sets = XVECLEN (newpat, 0) + extra_sets;
3457 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3458 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3459 sizeof (old->elem[0]) * old->num_elem);
3461 else
3463 rtx old = newpat;
3464 total_sets = 1 + extra_sets;
3465 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3466 XVECEXP (newpat, 0, 0) = old;
3469 if (added_sets_0)
3470 XVECEXP (newpat, 0, --total_sets) = i0pat;
3472 if (added_sets_1)
3474 rtx t = i1pat;
3475 if (i0_feeds_i1_n)
3476 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3478 XVECEXP (newpat, 0, --total_sets) = t;
3480 if (added_sets_2)
3482 rtx t = i2pat;
3483 if (i1_feeds_i2_n)
3484 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3485 i0_feeds_i1_n && i0dest_in_i0src);
3486 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3487 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3489 XVECEXP (newpat, 0, --total_sets) = t;
3493 validate_replacement:
3495 /* Note which hard regs this insn has as inputs. */
3496 mark_used_regs_combine (newpat);
3498 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3499 consider splitting this pattern, we might need these clobbers. */
3500 if (i1 && GET_CODE (newpat) == PARALLEL
3501 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3503 int len = XVECLEN (newpat, 0);
3505 newpat_vec_with_clobbers = rtvec_alloc (len);
3506 for (i = 0; i < len; i++)
3507 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3510 /* We have recognized nothing yet. */
3511 insn_code_number = -1;
3513 /* See if this is a PARALLEL of two SETs where one SET's destination is
3514 a register that is unused and this isn't marked as an instruction that
3515 might trap in an EH region. In that case, we just need the other SET.
3516 We prefer this over the PARALLEL.
3518 This can occur when simplifying a divmod insn. We *must* test for this
3519 case here because the code below that splits two independent SETs doesn't
3520 handle this case correctly when it updates the register status.
3522 It's pointless doing this if we originally had two sets, one from
3523 i3, and one from i2. Combining then splitting the parallel results
3524 in the original i2 again plus an invalid insn (which we delete).
3525 The net effect is only to move instructions around, which makes
3526 debug info less accurate.
3528 If the remaining SET came from I2 its destination should not be used
3529 between I2 and I3. See PR82024. */
3531 if (!(added_sets_2 && i1 == 0)
3532 && is_parallel_of_n_reg_sets (newpat, 2)
3533 && asm_noperands (newpat) < 0)
3535 rtx set0 = XVECEXP (newpat, 0, 0);
3536 rtx set1 = XVECEXP (newpat, 0, 1);
3537 rtx oldpat = newpat;
3539 if (((REG_P (SET_DEST (set1))
3540 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3541 || (GET_CODE (SET_DEST (set1)) == SUBREG
3542 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3543 && insn_nothrow_p (i3)
3544 && !side_effects_p (SET_SRC (set1)))
3546 newpat = set0;
3547 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3550 else if (((REG_P (SET_DEST (set0))
3551 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3552 || (GET_CODE (SET_DEST (set0)) == SUBREG
3553 && find_reg_note (i3, REG_UNUSED,
3554 SUBREG_REG (SET_DEST (set0)))))
3555 && insn_nothrow_p (i3)
3556 && !side_effects_p (SET_SRC (set0)))
3558 rtx dest = SET_DEST (set1);
3559 if (GET_CODE (dest) == SUBREG)
3560 dest = SUBREG_REG (dest);
3561 if (!reg_used_between_p (dest, i2, i3))
3563 newpat = set1;
3564 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3566 if (insn_code_number >= 0)
3567 changed_i3_dest = 1;
3571 if (insn_code_number < 0)
3572 newpat = oldpat;
3575 /* Is the result of combination a valid instruction? */
3576 if (insn_code_number < 0)
3577 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3579 /* If we were combining three insns and the result is a simple SET
3580 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3581 insns. There are two ways to do this. It can be split using a
3582 machine-specific method (like when you have an addition of a large
3583 constant) or by combine in the function find_split_point. */
3585 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3586 && asm_noperands (newpat) < 0)
3588 rtx parallel, *split;
3589 rtx_insn *m_split_insn;
3591 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3592 use I2DEST as a scratch register will help. In the latter case,
3593 convert I2DEST to the mode of the source of NEWPAT if we can. */
3595 m_split_insn = combine_split_insns (newpat, i3);
3597 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3598 inputs of NEWPAT. */
3600 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3601 possible to try that as a scratch reg. This would require adding
3602 more code to make it work though. */
3604 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3606 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3608 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3609 (temporarily, until we are committed to this instruction
3610 combination) does not work: for example, any call to nonzero_bits
3611 on the register (from a splitter in the MD file, for example)
3612 will get the old information, which is invalid.
3614 Since nowadays we can create registers during combine just fine,
3615 we should just create a new one here, not reuse i2dest. */
3617 /* First try to split using the original register as a
3618 scratch register. */
3619 parallel = gen_rtx_PARALLEL (VOIDmode,
3620 gen_rtvec (2, newpat,
3621 gen_rtx_CLOBBER (VOIDmode,
3622 i2dest)));
3623 m_split_insn = combine_split_insns (parallel, i3);
3625 /* If that didn't work, try changing the mode of I2DEST if
3626 we can. */
3627 if (m_split_insn == 0
3628 && new_mode != GET_MODE (i2dest)
3629 && new_mode != VOIDmode
3630 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3632 machine_mode old_mode = GET_MODE (i2dest);
3633 rtx ni2dest;
3635 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3636 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3637 else
3639 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3640 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3643 parallel = (gen_rtx_PARALLEL
3644 (VOIDmode,
3645 gen_rtvec (2, newpat,
3646 gen_rtx_CLOBBER (VOIDmode,
3647 ni2dest))));
3648 m_split_insn = combine_split_insns (parallel, i3);
3650 if (m_split_insn == 0
3651 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3653 struct undo *buf;
3655 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3656 buf = undobuf.undos;
3657 undobuf.undos = buf->next;
3658 buf->next = undobuf.frees;
3659 undobuf.frees = buf;
3663 i2scratch = m_split_insn != 0;
3666 /* If recog_for_combine has discarded clobbers, try to use them
3667 again for the split. */
3668 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3670 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3671 m_split_insn = combine_split_insns (parallel, i3);
3674 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3676 rtx m_split_pat = PATTERN (m_split_insn);
3677 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3678 if (insn_code_number >= 0)
3679 newpat = m_split_pat;
3681 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3682 && (next_nonnote_nondebug_insn (i2) == i3
3683 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3685 rtx i2set, i3set;
3686 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3687 newi2pat = PATTERN (m_split_insn);
3689 i3set = single_set (NEXT_INSN (m_split_insn));
3690 i2set = single_set (m_split_insn);
3692 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3694 /* If I2 or I3 has multiple SETs, we won't know how to track
3695 register status, so don't use these insns. If I2's destination
3696 is used between I2 and I3, we also can't use these insns. */
3698 if (i2_code_number >= 0 && i2set && i3set
3699 && (next_nonnote_nondebug_insn (i2) == i3
3700 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3701 insn_code_number = recog_for_combine (&newi3pat, i3,
3702 &new_i3_notes);
3703 if (insn_code_number >= 0)
3704 newpat = newi3pat;
3706 /* It is possible that both insns now set the destination of I3.
3707 If so, we must show an extra use of it. */
3709 if (insn_code_number >= 0)
3711 rtx new_i3_dest = SET_DEST (i3set);
3712 rtx new_i2_dest = SET_DEST (i2set);
3714 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3715 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3716 || GET_CODE (new_i3_dest) == SUBREG)
3717 new_i3_dest = XEXP (new_i3_dest, 0);
3719 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3720 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3721 || GET_CODE (new_i2_dest) == SUBREG)
3722 new_i2_dest = XEXP (new_i2_dest, 0);
3724 if (REG_P (new_i3_dest)
3725 && REG_P (new_i2_dest)
3726 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3727 && REGNO (new_i2_dest) < reg_n_sets_max)
3728 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3732 /* If we can split it and use I2DEST, go ahead and see if that
3733 helps things be recognized. Verify that none of the registers
3734 are set between I2 and I3. */
3735 if (insn_code_number < 0
3736 && (split = find_split_point (&newpat, i3, false)) != 0
3737 && (!HAVE_cc0 || REG_P (i2dest))
3738 /* We need I2DEST in the proper mode. If it is a hard register
3739 or the only use of a pseudo, we can change its mode.
3740 Make sure we don't change a hard register to have a mode that
3741 isn't valid for it, or change the number of registers. */
3742 && (GET_MODE (*split) == GET_MODE (i2dest)
3743 || GET_MODE (*split) == VOIDmode
3744 || can_change_dest_mode (i2dest, added_sets_2,
3745 GET_MODE (*split)))
3746 && (next_nonnote_nondebug_insn (i2) == i3
3747 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3748 /* We can't overwrite I2DEST if its value is still used by
3749 NEWPAT. */
3750 && ! reg_referenced_p (i2dest, newpat))
3752 rtx newdest = i2dest;
3753 enum rtx_code split_code = GET_CODE (*split);
3754 machine_mode split_mode = GET_MODE (*split);
3755 bool subst_done = false;
3756 newi2pat = NULL_RTX;
3758 i2scratch = true;
3760 /* *SPLIT may be part of I2SRC, so make sure we have the
3761 original expression around for later debug processing.
3762 We should not need I2SRC any more in other cases. */
3763 if (MAY_HAVE_DEBUG_INSNS)
3764 i2src = copy_rtx (i2src);
3765 else
3766 i2src = NULL;
3768 /* Get NEWDEST as a register in the proper mode. We have already
3769 validated that we can do this. */
3770 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3772 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3773 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3774 else
3776 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3777 newdest = regno_reg_rtx[REGNO (i2dest)];
3781 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3782 an ASHIFT. This can occur if it was inside a PLUS and hence
3783 appeared to be a memory address. This is a kludge. */
3784 if (split_code == MULT
3785 && CONST_INT_P (XEXP (*split, 1))
3786 && INTVAL (XEXP (*split, 1)) > 0
3787 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3789 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3790 XEXP (*split, 0), GEN_INT (i)));
3791 /* Update split_code because we may not have a multiply
3792 anymore. */
3793 split_code = GET_CODE (*split);
3796 /* Similarly for (plus (mult FOO (const_int pow2))). */
3797 if (split_code == PLUS
3798 && GET_CODE (XEXP (*split, 0)) == MULT
3799 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3800 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3801 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3803 rtx nsplit = XEXP (*split, 0);
3804 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3805 XEXP (nsplit, 0), GEN_INT (i)));
3806 /* Update split_code because we may not have a multiply
3807 anymore. */
3808 split_code = GET_CODE (*split);
3811 #ifdef INSN_SCHEDULING
3812 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3813 be written as a ZERO_EXTEND. */
3814 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3816 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3817 what it really is. */
3818 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3819 == SIGN_EXTEND)
3820 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3821 SUBREG_REG (*split)));
3822 else
3823 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3824 SUBREG_REG (*split)));
3826 #endif
3828 /* Attempt to split binary operators using arithmetic identities. */
3829 if (BINARY_P (SET_SRC (newpat))
3830 && split_mode == GET_MODE (SET_SRC (newpat))
3831 && ! side_effects_p (SET_SRC (newpat)))
3833 rtx setsrc = SET_SRC (newpat);
3834 machine_mode mode = GET_MODE (setsrc);
3835 enum rtx_code code = GET_CODE (setsrc);
3836 rtx src_op0 = XEXP (setsrc, 0);
3837 rtx src_op1 = XEXP (setsrc, 1);
3839 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3840 if (rtx_equal_p (src_op0, src_op1))
3842 newi2pat = gen_rtx_SET (newdest, src_op0);
3843 SUBST (XEXP (setsrc, 0), newdest);
3844 SUBST (XEXP (setsrc, 1), newdest);
3845 subst_done = true;
3847 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3848 else if ((code == PLUS || code == MULT)
3849 && GET_CODE (src_op0) == code
3850 && GET_CODE (XEXP (src_op0, 0)) == code
3851 && (INTEGRAL_MODE_P (mode)
3852 || (FLOAT_MODE_P (mode)
3853 && flag_unsafe_math_optimizations)))
3855 rtx p = XEXP (XEXP (src_op0, 0), 0);
3856 rtx q = XEXP (XEXP (src_op0, 0), 1);
3857 rtx r = XEXP (src_op0, 1);
3858 rtx s = src_op1;
3860 /* Split both "((X op Y) op X) op Y" and
3861 "((X op Y) op Y) op X" as "T op T" where T is
3862 "X op Y". */
3863 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3864 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3866 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3867 SUBST (XEXP (setsrc, 0), newdest);
3868 SUBST (XEXP (setsrc, 1), newdest);
3869 subst_done = true;
3871 /* Split "((X op X) op Y) op Y)" as "T op T" where
3872 T is "X op Y". */
3873 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3875 rtx tmp = simplify_gen_binary (code, mode, p, r);
3876 newi2pat = gen_rtx_SET (newdest, tmp);
3877 SUBST (XEXP (setsrc, 0), newdest);
3878 SUBST (XEXP (setsrc, 1), newdest);
3879 subst_done = true;
3884 if (!subst_done)
3886 newi2pat = gen_rtx_SET (newdest, *split);
3887 SUBST (*split, newdest);
3890 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3892 /* recog_for_combine might have added CLOBBERs to newi2pat.
3893 Make sure NEWPAT does not depend on the clobbered regs. */
3894 if (GET_CODE (newi2pat) == PARALLEL)
3895 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3896 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3898 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3899 if (reg_overlap_mentioned_p (reg, newpat))
3901 undo_all ();
3902 return 0;
3906 /* If the split point was a MULT and we didn't have one before,
3907 don't use one now. */
3908 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3909 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3913 /* Check for a case where we loaded from memory in a narrow mode and
3914 then sign extended it, but we need both registers. In that case,
3915 we have a PARALLEL with both loads from the same memory location.
3916 We can split this into a load from memory followed by a register-register
3917 copy. This saves at least one insn, more if register allocation can
3918 eliminate the copy.
3920 We cannot do this if the destination of the first assignment is a
3921 condition code register or cc0. We eliminate this case by making sure
3922 the SET_DEST and SET_SRC have the same mode.
3924 We cannot do this if the destination of the second assignment is
3925 a register that we have already assumed is zero-extended. Similarly
3926 for a SUBREG of such a register. */
3928 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3929 && GET_CODE (newpat) == PARALLEL
3930 && XVECLEN (newpat, 0) == 2
3931 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3932 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3933 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3934 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3935 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3936 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3937 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3938 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3939 DF_INSN_LUID (i2))
3940 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3941 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3942 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3943 (REG_P (temp_expr)
3944 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3945 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3946 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3947 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3948 != GET_MODE_MASK (word_mode))))
3949 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3950 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3951 (REG_P (temp_expr)
3952 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3953 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3954 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3955 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3956 != GET_MODE_MASK (word_mode)))))
3957 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3958 SET_SRC (XVECEXP (newpat, 0, 1)))
3959 && ! find_reg_note (i3, REG_UNUSED,
3960 SET_DEST (XVECEXP (newpat, 0, 0))))
3962 rtx ni2dest;
3964 newi2pat = XVECEXP (newpat, 0, 0);
3965 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3966 newpat = XVECEXP (newpat, 0, 1);
3967 SUBST (SET_SRC (newpat),
3968 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3969 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3971 if (i2_code_number >= 0)
3972 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3974 if (insn_code_number >= 0)
3975 swap_i2i3 = 1;
3978 /* Similarly, check for a case where we have a PARALLEL of two independent
3979 SETs but we started with three insns. In this case, we can do the sets
3980 as two separate insns. This case occurs when some SET allows two
3981 other insns to combine, but the destination of that SET is still live.
3983 Also do this if we started with two insns and (at least) one of the
3984 resulting sets is a noop; this noop will be deleted later. */
3986 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3987 && GET_CODE (newpat) == PARALLEL
3988 && XVECLEN (newpat, 0) == 2
3989 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3990 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3991 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3992 || set_noop_p (XVECEXP (newpat, 0, 1)))
3993 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3994 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3995 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3996 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3997 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3998 XVECEXP (newpat, 0, 0))
3999 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4000 XVECEXP (newpat, 0, 1))
4001 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4002 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4004 rtx set0 = XVECEXP (newpat, 0, 0);
4005 rtx set1 = XVECEXP (newpat, 0, 1);
4007 /* Normally, it doesn't matter which of the two is done first,
4008 but the one that references cc0 can't be the second, and
4009 one which uses any regs/memory set in between i2 and i3 can't
4010 be first. The PARALLEL might also have been pre-existing in i3,
4011 so we need to make sure that we won't wrongly hoist a SET to i2
4012 that would conflict with a death note present in there. */
4013 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
4014 && !(REG_P (SET_DEST (set1))
4015 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4016 && !(GET_CODE (SET_DEST (set1)) == SUBREG
4017 && find_reg_note (i2, REG_DEAD,
4018 SUBREG_REG (SET_DEST (set1))))
4019 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4020 /* If I3 is a jump, ensure that set0 is a jump so that
4021 we do not create invalid RTL. */
4022 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4025 newi2pat = set1;
4026 newpat = set0;
4028 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
4029 && !(REG_P (SET_DEST (set0))
4030 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4031 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4032 && find_reg_note (i2, REG_DEAD,
4033 SUBREG_REG (SET_DEST (set0))))
4034 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4035 /* If I3 is a jump, ensure that set1 is a jump so that
4036 we do not create invalid RTL. */
4037 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4040 newi2pat = set0;
4041 newpat = set1;
4043 else
4045 undo_all ();
4046 return 0;
4049 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4051 if (i2_code_number >= 0)
4053 /* recog_for_combine might have added CLOBBERs to newi2pat.
4054 Make sure NEWPAT does not depend on the clobbered regs. */
4055 if (GET_CODE (newi2pat) == PARALLEL)
4057 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4058 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4060 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4061 if (reg_overlap_mentioned_p (reg, newpat))
4063 undo_all ();
4064 return 0;
4069 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4073 /* If it still isn't recognized, fail and change things back the way they
4074 were. */
4075 if ((insn_code_number < 0
4076 /* Is the result a reasonable ASM_OPERANDS? */
4077 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4079 undo_all ();
4080 return 0;
4083 /* If we had to change another insn, make sure it is valid also. */
4084 if (undobuf.other_insn)
4086 CLEAR_HARD_REG_SET (newpat_used_regs);
4088 other_pat = PATTERN (undobuf.other_insn);
4089 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4090 &new_other_notes);
4092 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4094 undo_all ();
4095 return 0;
4099 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4100 they are adjacent to each other or not. */
4101 if (HAVE_cc0)
4103 rtx_insn *p = prev_nonnote_insn (i3);
4104 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4105 && sets_cc0_p (newi2pat))
4107 undo_all ();
4108 return 0;
4112 /* Only allow this combination if insn_cost reports that the
4113 replacement instructions are cheaper than the originals. */
4114 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4116 undo_all ();
4117 return 0;
4120 if (MAY_HAVE_DEBUG_INSNS)
4122 struct undo *undo;
4124 for (undo = undobuf.undos; undo; undo = undo->next)
4125 if (undo->kind == UNDO_MODE)
4127 rtx reg = *undo->where.r;
4128 machine_mode new_mode = GET_MODE (reg);
4129 machine_mode old_mode = undo->old_contents.m;
4131 /* Temporarily revert mode back. */
4132 adjust_reg_mode (reg, old_mode);
4134 if (reg == i2dest && i2scratch)
4136 /* If we used i2dest as a scratch register with a
4137 different mode, substitute it for the original
4138 i2src while its original mode is temporarily
4139 restored, and then clear i2scratch so that we don't
4140 do it again later. */
4141 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4142 this_basic_block);
4143 i2scratch = false;
4144 /* Put back the new mode. */
4145 adjust_reg_mode (reg, new_mode);
4147 else
4149 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4150 rtx_insn *first, *last;
4152 if (reg == i2dest)
4154 first = i2;
4155 last = last_combined_insn;
4157 else
4159 first = i3;
4160 last = undobuf.other_insn;
4161 gcc_assert (last);
4162 if (DF_INSN_LUID (last)
4163 < DF_INSN_LUID (last_combined_insn))
4164 last = last_combined_insn;
4167 /* We're dealing with a reg that changed mode but not
4168 meaning, so we want to turn it into a subreg for
4169 the new mode. However, because of REG sharing and
4170 because its mode had already changed, we have to do
4171 it in two steps. First, replace any debug uses of
4172 reg, with its original mode temporarily restored,
4173 with this copy we have created; then, replace the
4174 copy with the SUBREG of the original shared reg,
4175 once again changed to the new mode. */
4176 propagate_for_debug (first, last, reg, tempreg,
4177 this_basic_block);
4178 adjust_reg_mode (reg, new_mode);
4179 propagate_for_debug (first, last, tempreg,
4180 lowpart_subreg (old_mode, reg, new_mode),
4181 this_basic_block);
4186 /* If we will be able to accept this, we have made a
4187 change to the destination of I3. This requires us to
4188 do a few adjustments. */
4190 if (changed_i3_dest)
4192 PATTERN (i3) = newpat;
4193 adjust_for_new_dest (i3);
4196 /* We now know that we can do this combination. Merge the insns and
4197 update the status of registers and LOG_LINKS. */
4199 if (undobuf.other_insn)
4201 rtx note, next;
4203 PATTERN (undobuf.other_insn) = other_pat;
4205 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4206 ensure that they are still valid. Then add any non-duplicate
4207 notes added by recog_for_combine. */
4208 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4210 next = XEXP (note, 1);
4212 if ((REG_NOTE_KIND (note) == REG_DEAD
4213 && !reg_referenced_p (XEXP (note, 0),
4214 PATTERN (undobuf.other_insn)))
4215 ||(REG_NOTE_KIND (note) == REG_UNUSED
4216 && !reg_set_p (XEXP (note, 0),
4217 PATTERN (undobuf.other_insn)))
4218 /* Simply drop equal note since it may be no longer valid
4219 for other_insn. It may be possible to record that CC
4220 register is changed and only discard those notes, but
4221 in practice it's unnecessary complication and doesn't
4222 give any meaningful improvement.
4224 See PR78559. */
4225 || REG_NOTE_KIND (note) == REG_EQUAL
4226 || REG_NOTE_KIND (note) == REG_EQUIV)
4227 remove_note (undobuf.other_insn, note);
4230 distribute_notes (new_other_notes, undobuf.other_insn,
4231 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4232 NULL_RTX);
4235 if (swap_i2i3)
4237 rtx_insn *insn;
4238 struct insn_link *link;
4239 rtx ni2dest;
4241 /* I3 now uses what used to be its destination and which is now
4242 I2's destination. This requires us to do a few adjustments. */
4243 PATTERN (i3) = newpat;
4244 adjust_for_new_dest (i3);
4246 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4247 so we still will.
4249 However, some later insn might be using I2's dest and have
4250 a LOG_LINK pointing at I3. We must remove this link.
4251 The simplest way to remove the link is to point it at I1,
4252 which we know will be a NOTE. */
4254 /* newi2pat is usually a SET here; however, recog_for_combine might
4255 have added some clobbers. */
4256 if (GET_CODE (newi2pat) == PARALLEL)
4257 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4258 else
4259 ni2dest = SET_DEST (newi2pat);
4261 for (insn = NEXT_INSN (i3);
4262 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4263 || insn != BB_HEAD (this_basic_block->next_bb));
4264 insn = NEXT_INSN (insn))
4266 if (NONDEBUG_INSN_P (insn)
4267 && reg_referenced_p (ni2dest, PATTERN (insn)))
4269 FOR_EACH_LOG_LINK (link, insn)
4270 if (link->insn == i3)
4271 link->insn = i1;
4273 break;
4279 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4280 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4281 rtx midnotes = 0;
4282 int from_luid;
4283 /* Compute which registers we expect to eliminate. newi2pat may be setting
4284 either i3dest or i2dest, so we must check it. */
4285 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4286 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4287 || !i2dest_killed
4288 ? 0 : i2dest);
4289 /* For i1, we need to compute both local elimination and global
4290 elimination information with respect to newi2pat because i1dest
4291 may be the same as i3dest, in which case newi2pat may be setting
4292 i1dest. Global information is used when distributing REG_DEAD
4293 note for i2 and i3, in which case it does matter if newi2pat sets
4294 i1dest or not.
4296 Local information is used when distributing REG_DEAD note for i1,
4297 in which case it doesn't matter if newi2pat sets i1dest or not.
4298 See PR62151, if we have four insns combination:
4299 i0: r0 <- i0src
4300 i1: r1 <- i1src (using r0)
4301 REG_DEAD (r0)
4302 i2: r0 <- i2src (using r1)
4303 i3: r3 <- i3src (using r0)
4304 ix: using r0
4305 From i1's point of view, r0 is eliminated, no matter if it is set
4306 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4307 should be discarded.
4309 Note local information only affects cases in forms like "I1->I2->I3",
4310 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4311 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4312 i0dest anyway. */
4313 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4314 || !i1dest_killed
4315 ? 0 : i1dest);
4316 rtx elim_i1 = (local_elim_i1 == 0
4317 || (newi2pat && reg_set_p (i1dest, newi2pat))
4318 ? 0 : i1dest);
4319 /* Same case as i1. */
4320 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4321 ? 0 : i0dest);
4322 rtx elim_i0 = (local_elim_i0 == 0
4323 || (newi2pat && reg_set_p (i0dest, newi2pat))
4324 ? 0 : i0dest);
4326 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4327 clear them. */
4328 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4329 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4330 if (i1)
4331 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4332 if (i0)
4333 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4335 /* Ensure that we do not have something that should not be shared but
4336 occurs multiple times in the new insns. Check this by first
4337 resetting all the `used' flags and then copying anything is shared. */
4339 reset_used_flags (i3notes);
4340 reset_used_flags (i2notes);
4341 reset_used_flags (i1notes);
4342 reset_used_flags (i0notes);
4343 reset_used_flags (newpat);
4344 reset_used_flags (newi2pat);
4345 if (undobuf.other_insn)
4346 reset_used_flags (PATTERN (undobuf.other_insn));
4348 i3notes = copy_rtx_if_shared (i3notes);
4349 i2notes = copy_rtx_if_shared (i2notes);
4350 i1notes = copy_rtx_if_shared (i1notes);
4351 i0notes = copy_rtx_if_shared (i0notes);
4352 newpat = copy_rtx_if_shared (newpat);
4353 newi2pat = copy_rtx_if_shared (newi2pat);
4354 if (undobuf.other_insn)
4355 reset_used_flags (PATTERN (undobuf.other_insn));
4357 INSN_CODE (i3) = insn_code_number;
4358 PATTERN (i3) = newpat;
4360 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4362 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4363 link = XEXP (link, 1))
4365 if (substed_i2)
4367 /* I2SRC must still be meaningful at this point. Some
4368 splitting operations can invalidate I2SRC, but those
4369 operations do not apply to calls. */
4370 gcc_assert (i2src);
4371 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4372 i2dest, i2src);
4374 if (substed_i1)
4375 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4376 i1dest, i1src);
4377 if (substed_i0)
4378 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4379 i0dest, i0src);
4383 if (undobuf.other_insn)
4384 INSN_CODE (undobuf.other_insn) = other_code_number;
4386 /* We had one special case above where I2 had more than one set and
4387 we replaced a destination of one of those sets with the destination
4388 of I3. In that case, we have to update LOG_LINKS of insns later
4389 in this basic block. Note that this (expensive) case is rare.
4391 Also, in this case, we must pretend that all REG_NOTEs for I2
4392 actually came from I3, so that REG_UNUSED notes from I2 will be
4393 properly handled. */
4395 if (i3_subst_into_i2)
4397 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4398 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4399 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4400 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4401 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4402 && ! find_reg_note (i2, REG_UNUSED,
4403 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4404 for (temp_insn = NEXT_INSN (i2);
4405 temp_insn
4406 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4407 || BB_HEAD (this_basic_block) != temp_insn);
4408 temp_insn = NEXT_INSN (temp_insn))
4409 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4410 FOR_EACH_LOG_LINK (link, temp_insn)
4411 if (link->insn == i2)
4412 link->insn = i3;
4414 if (i3notes)
4416 rtx link = i3notes;
4417 while (XEXP (link, 1))
4418 link = XEXP (link, 1);
4419 XEXP (link, 1) = i2notes;
4421 else
4422 i3notes = i2notes;
4423 i2notes = 0;
4426 LOG_LINKS (i3) = NULL;
4427 REG_NOTES (i3) = 0;
4428 LOG_LINKS (i2) = NULL;
4429 REG_NOTES (i2) = 0;
4431 if (newi2pat)
4433 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4434 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4435 this_basic_block);
4436 INSN_CODE (i2) = i2_code_number;
4437 PATTERN (i2) = newi2pat;
4439 else
4441 if (MAY_HAVE_DEBUG_INSNS && i2src)
4442 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4443 this_basic_block);
4444 SET_INSN_DELETED (i2);
4447 if (i1)
4449 LOG_LINKS (i1) = NULL;
4450 REG_NOTES (i1) = 0;
4451 if (MAY_HAVE_DEBUG_INSNS)
4452 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4453 this_basic_block);
4454 SET_INSN_DELETED (i1);
4457 if (i0)
4459 LOG_LINKS (i0) = NULL;
4460 REG_NOTES (i0) = 0;
4461 if (MAY_HAVE_DEBUG_INSNS)
4462 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4463 this_basic_block);
4464 SET_INSN_DELETED (i0);
4467 /* Get death notes for everything that is now used in either I3 or
4468 I2 and used to die in a previous insn. If we built two new
4469 patterns, move from I1 to I2 then I2 to I3 so that we get the
4470 proper movement on registers that I2 modifies. */
4472 if (i0)
4473 from_luid = DF_INSN_LUID (i0);
4474 else if (i1)
4475 from_luid = DF_INSN_LUID (i1);
4476 else
4477 from_luid = DF_INSN_LUID (i2);
4478 if (newi2pat)
4479 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4480 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4482 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4483 if (i3notes)
4484 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4485 elim_i2, elim_i1, elim_i0);
4486 if (i2notes)
4487 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4488 elim_i2, elim_i1, elim_i0);
4489 if (i1notes)
4490 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4491 elim_i2, local_elim_i1, local_elim_i0);
4492 if (i0notes)
4493 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4494 elim_i2, elim_i1, local_elim_i0);
4495 if (midnotes)
4496 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4497 elim_i2, elim_i1, elim_i0);
4499 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4500 know these are REG_UNUSED and want them to go to the desired insn,
4501 so we always pass it as i3. */
4503 if (newi2pat && new_i2_notes)
4504 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4505 NULL_RTX);
4507 if (new_i3_notes)
4508 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4509 NULL_RTX);
4511 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4512 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4513 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4514 in that case, it might delete I2. Similarly for I2 and I1.
4515 Show an additional death due to the REG_DEAD note we make here. If
4516 we discard it in distribute_notes, we will decrement it again. */
4518 if (i3dest_killed)
4520 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4521 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4522 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4523 elim_i1, elim_i0);
4524 else
4525 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4526 elim_i2, elim_i1, elim_i0);
4529 if (i2dest_in_i2src)
4531 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4532 if (newi2pat && reg_set_p (i2dest, newi2pat))
4533 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4534 NULL_RTX, NULL_RTX);
4535 else
4536 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4537 NULL_RTX, NULL_RTX, NULL_RTX);
4540 if (i1dest_in_i1src)
4542 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4543 if (newi2pat && reg_set_p (i1dest, newi2pat))
4544 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4545 NULL_RTX, NULL_RTX);
4546 else
4547 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4548 NULL_RTX, NULL_RTX, NULL_RTX);
4551 if (i0dest_in_i0src)
4553 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4554 if (newi2pat && reg_set_p (i0dest, newi2pat))
4555 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4556 NULL_RTX, NULL_RTX);
4557 else
4558 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4559 NULL_RTX, NULL_RTX, NULL_RTX);
4562 distribute_links (i3links);
4563 distribute_links (i2links);
4564 distribute_links (i1links);
4565 distribute_links (i0links);
4567 if (REG_P (i2dest))
4569 struct insn_link *link;
4570 rtx_insn *i2_insn = 0;
4571 rtx i2_val = 0, set;
4573 /* The insn that used to set this register doesn't exist, and
4574 this life of the register may not exist either. See if one of
4575 I3's links points to an insn that sets I2DEST. If it does,
4576 that is now the last known value for I2DEST. If we don't update
4577 this and I2 set the register to a value that depended on its old
4578 contents, we will get confused. If this insn is used, thing
4579 will be set correctly in combine_instructions. */
4580 FOR_EACH_LOG_LINK (link, i3)
4581 if ((set = single_set (link->insn)) != 0
4582 && rtx_equal_p (i2dest, SET_DEST (set)))
4583 i2_insn = link->insn, i2_val = SET_SRC (set);
4585 record_value_for_reg (i2dest, i2_insn, i2_val);
4587 /* If the reg formerly set in I2 died only once and that was in I3,
4588 zero its use count so it won't make `reload' do any work. */
4589 if (! added_sets_2
4590 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4591 && ! i2dest_in_i2src
4592 && REGNO (i2dest) < reg_n_sets_max)
4593 INC_REG_N_SETS (REGNO (i2dest), -1);
4596 if (i1 && REG_P (i1dest))
4598 struct insn_link *link;
4599 rtx_insn *i1_insn = 0;
4600 rtx i1_val = 0, set;
4602 FOR_EACH_LOG_LINK (link, i3)
4603 if ((set = single_set (link->insn)) != 0
4604 && rtx_equal_p (i1dest, SET_DEST (set)))
4605 i1_insn = link->insn, i1_val = SET_SRC (set);
4607 record_value_for_reg (i1dest, i1_insn, i1_val);
4609 if (! added_sets_1
4610 && ! i1dest_in_i1src
4611 && REGNO (i1dest) < reg_n_sets_max)
4612 INC_REG_N_SETS (REGNO (i1dest), -1);
4615 if (i0 && REG_P (i0dest))
4617 struct insn_link *link;
4618 rtx_insn *i0_insn = 0;
4619 rtx i0_val = 0, set;
4621 FOR_EACH_LOG_LINK (link, i3)
4622 if ((set = single_set (link->insn)) != 0
4623 && rtx_equal_p (i0dest, SET_DEST (set)))
4624 i0_insn = link->insn, i0_val = SET_SRC (set);
4626 record_value_for_reg (i0dest, i0_insn, i0_val);
4628 if (! added_sets_0
4629 && ! i0dest_in_i0src
4630 && REGNO (i0dest) < reg_n_sets_max)
4631 INC_REG_N_SETS (REGNO (i0dest), -1);
4634 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4635 been made to this insn. The order is important, because newi2pat
4636 can affect nonzero_bits of newpat. */
4637 if (newi2pat)
4638 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4639 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4642 if (undobuf.other_insn != NULL_RTX)
4644 if (dump_file)
4646 fprintf (dump_file, "modifying other_insn ");
4647 dump_insn_slim (dump_file, undobuf.other_insn);
4649 df_insn_rescan (undobuf.other_insn);
4652 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4654 if (dump_file)
4656 fprintf (dump_file, "modifying insn i0 ");
4657 dump_insn_slim (dump_file, i0);
4659 df_insn_rescan (i0);
4662 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4664 if (dump_file)
4666 fprintf (dump_file, "modifying insn i1 ");
4667 dump_insn_slim (dump_file, i1);
4669 df_insn_rescan (i1);
4672 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4674 if (dump_file)
4676 fprintf (dump_file, "modifying insn i2 ");
4677 dump_insn_slim (dump_file, i2);
4679 df_insn_rescan (i2);
4682 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4684 if (dump_file)
4686 fprintf (dump_file, "modifying insn i3 ");
4687 dump_insn_slim (dump_file, i3);
4689 df_insn_rescan (i3);
4692 /* Set new_direct_jump_p if a new return or simple jump instruction
4693 has been created. Adjust the CFG accordingly. */
4694 if (returnjump_p (i3) || any_uncondjump_p (i3))
4696 *new_direct_jump_p = 1;
4697 mark_jump_label (PATTERN (i3), i3, 0);
4698 update_cfg_for_uncondjump (i3);
4701 if (undobuf.other_insn != NULL_RTX
4702 && (returnjump_p (undobuf.other_insn)
4703 || any_uncondjump_p (undobuf.other_insn)))
4705 *new_direct_jump_p = 1;
4706 update_cfg_for_uncondjump (undobuf.other_insn);
4709 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4710 && XEXP (PATTERN (i3), 0) == const1_rtx)
4712 basic_block bb = BLOCK_FOR_INSN (i3);
4713 gcc_assert (bb);
4714 remove_edge (split_block (bb, i3));
4715 emit_barrier_after_bb (bb);
4716 *new_direct_jump_p = 1;
4719 if (undobuf.other_insn
4720 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4721 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4723 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4724 gcc_assert (bb);
4725 remove_edge (split_block (bb, undobuf.other_insn));
4726 emit_barrier_after_bb (bb);
4727 *new_direct_jump_p = 1;
4730 /* A noop might also need cleaning up of CFG, if it comes from the
4731 simplification of a jump. */
4732 if (JUMP_P (i3)
4733 && GET_CODE (newpat) == SET
4734 && SET_SRC (newpat) == pc_rtx
4735 && SET_DEST (newpat) == pc_rtx)
4737 *new_direct_jump_p = 1;
4738 update_cfg_for_uncondjump (i3);
4741 if (undobuf.other_insn != NULL_RTX
4742 && JUMP_P (undobuf.other_insn)
4743 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4744 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4745 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4747 *new_direct_jump_p = 1;
4748 update_cfg_for_uncondjump (undobuf.other_insn);
4751 combine_successes++;
4752 undo_commit ();
4754 if (added_links_insn
4755 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4756 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4757 return added_links_insn;
4758 else
4759 return newi2pat ? i2 : i3;
4762 /* Get a marker for undoing to the current state. */
4764 static void *
4765 get_undo_marker (void)
4767 return undobuf.undos;
4770 /* Undo the modifications up to the marker. */
4772 static void
4773 undo_to_marker (void *marker)
4775 struct undo *undo, *next;
4777 for (undo = undobuf.undos; undo != marker; undo = next)
4779 gcc_assert (undo);
4781 next = undo->next;
4782 switch (undo->kind)
4784 case UNDO_RTX:
4785 *undo->where.r = undo->old_contents.r;
4786 break;
4787 case UNDO_INT:
4788 *undo->where.i = undo->old_contents.i;
4789 break;
4790 case UNDO_MODE:
4791 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4792 break;
4793 case UNDO_LINKS:
4794 *undo->where.l = undo->old_contents.l;
4795 break;
4796 default:
4797 gcc_unreachable ();
4800 undo->next = undobuf.frees;
4801 undobuf.frees = undo;
4804 undobuf.undos = (struct undo *) marker;
4807 /* Undo all the modifications recorded in undobuf. */
4809 static void
4810 undo_all (void)
4812 undo_to_marker (0);
4815 /* We've committed to accepting the changes we made. Move all
4816 of the undos to the free list. */
4818 static void
4819 undo_commit (void)
4821 struct undo *undo, *next;
4823 for (undo = undobuf.undos; undo; undo = next)
4825 next = undo->next;
4826 undo->next = undobuf.frees;
4827 undobuf.frees = undo;
4829 undobuf.undos = 0;
4832 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4833 where we have an arithmetic expression and return that point. LOC will
4834 be inside INSN.
4836 try_combine will call this function to see if an insn can be split into
4837 two insns. */
4839 static rtx *
4840 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4842 rtx x = *loc;
4843 enum rtx_code code = GET_CODE (x);
4844 rtx *split;
4845 unsigned HOST_WIDE_INT len = 0;
4846 HOST_WIDE_INT pos = 0;
4847 int unsignedp = 0;
4848 rtx inner = NULL_RTX;
4849 scalar_int_mode mode, inner_mode;
4851 /* First special-case some codes. */
4852 switch (code)
4854 case SUBREG:
4855 #ifdef INSN_SCHEDULING
4856 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4857 point. */
4858 if (MEM_P (SUBREG_REG (x)))
4859 return loc;
4860 #endif
4861 return find_split_point (&SUBREG_REG (x), insn, false);
4863 case MEM:
4864 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4865 using LO_SUM and HIGH. */
4866 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4867 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4869 machine_mode address_mode = get_address_mode (x);
4871 SUBST (XEXP (x, 0),
4872 gen_rtx_LO_SUM (address_mode,
4873 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4874 XEXP (x, 0)));
4875 return &XEXP (XEXP (x, 0), 0);
4878 /* If we have a PLUS whose second operand is a constant and the
4879 address is not valid, perhaps will can split it up using
4880 the machine-specific way to split large constants. We use
4881 the first pseudo-reg (one of the virtual regs) as a placeholder;
4882 it will not remain in the result. */
4883 if (GET_CODE (XEXP (x, 0)) == PLUS
4884 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4885 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4886 MEM_ADDR_SPACE (x)))
4888 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4889 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4890 subst_insn);
4892 /* This should have produced two insns, each of which sets our
4893 placeholder. If the source of the second is a valid address,
4894 we can make put both sources together and make a split point
4895 in the middle. */
4897 if (seq
4898 && NEXT_INSN (seq) != NULL_RTX
4899 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4900 && NONJUMP_INSN_P (seq)
4901 && GET_CODE (PATTERN (seq)) == SET
4902 && SET_DEST (PATTERN (seq)) == reg
4903 && ! reg_mentioned_p (reg,
4904 SET_SRC (PATTERN (seq)))
4905 && NONJUMP_INSN_P (NEXT_INSN (seq))
4906 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4907 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4908 && memory_address_addr_space_p
4909 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4910 MEM_ADDR_SPACE (x)))
4912 rtx src1 = SET_SRC (PATTERN (seq));
4913 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4915 /* Replace the placeholder in SRC2 with SRC1. If we can
4916 find where in SRC2 it was placed, that can become our
4917 split point and we can replace this address with SRC2.
4918 Just try two obvious places. */
4920 src2 = replace_rtx (src2, reg, src1);
4921 split = 0;
4922 if (XEXP (src2, 0) == src1)
4923 split = &XEXP (src2, 0);
4924 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4925 && XEXP (XEXP (src2, 0), 0) == src1)
4926 split = &XEXP (XEXP (src2, 0), 0);
4928 if (split)
4930 SUBST (XEXP (x, 0), src2);
4931 return split;
4935 /* If that didn't work, perhaps the first operand is complex and
4936 needs to be computed separately, so make a split point there.
4937 This will occur on machines that just support REG + CONST
4938 and have a constant moved through some previous computation. */
4940 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4941 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4942 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4943 return &XEXP (XEXP (x, 0), 0);
4946 /* If we have a PLUS whose first operand is complex, try computing it
4947 separately by making a split there. */
4948 if (GET_CODE (XEXP (x, 0)) == PLUS
4949 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4950 MEM_ADDR_SPACE (x))
4951 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4952 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4953 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4954 return &XEXP (XEXP (x, 0), 0);
4955 break;
4957 case SET:
4958 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4959 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4960 we need to put the operand into a register. So split at that
4961 point. */
4963 if (SET_DEST (x) == cc0_rtx
4964 && GET_CODE (SET_SRC (x)) != COMPARE
4965 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4966 && !OBJECT_P (SET_SRC (x))
4967 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4968 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4969 return &SET_SRC (x);
4971 /* See if we can split SET_SRC as it stands. */
4972 split = find_split_point (&SET_SRC (x), insn, true);
4973 if (split && split != &SET_SRC (x))
4974 return split;
4976 /* See if we can split SET_DEST as it stands. */
4977 split = find_split_point (&SET_DEST (x), insn, false);
4978 if (split && split != &SET_DEST (x))
4979 return split;
4981 /* See if this is a bitfield assignment with everything constant. If
4982 so, this is an IOR of an AND, so split it into that. */
4983 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4984 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
4985 &inner_mode)
4986 && HWI_COMPUTABLE_MODE_P (inner_mode)
4987 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4988 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4989 && CONST_INT_P (SET_SRC (x))
4990 && ((INTVAL (XEXP (SET_DEST (x), 1))
4991 + INTVAL (XEXP (SET_DEST (x), 2)))
4992 <= GET_MODE_PRECISION (inner_mode))
4993 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4995 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4996 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4997 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4998 rtx dest = XEXP (SET_DEST (x), 0);
4999 unsigned HOST_WIDE_INT mask
5000 = (HOST_WIDE_INT_1U << len) - 1;
5001 rtx or_mask;
5003 if (BITS_BIG_ENDIAN)
5004 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5006 or_mask = gen_int_mode (src << pos, inner_mode);
5007 if (src == mask)
5008 SUBST (SET_SRC (x),
5009 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5010 else
5012 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5013 SUBST (SET_SRC (x),
5014 simplify_gen_binary (IOR, inner_mode,
5015 simplify_gen_binary (AND, inner_mode,
5016 dest, negmask),
5017 or_mask));
5020 SUBST (SET_DEST (x), dest);
5022 split = find_split_point (&SET_SRC (x), insn, true);
5023 if (split && split != &SET_SRC (x))
5024 return split;
5027 /* Otherwise, see if this is an operation that we can split into two.
5028 If so, try to split that. */
5029 code = GET_CODE (SET_SRC (x));
5031 switch (code)
5033 case AND:
5034 /* If we are AND'ing with a large constant that is only a single
5035 bit and the result is only being used in a context where we
5036 need to know if it is zero or nonzero, replace it with a bit
5037 extraction. This will avoid the large constant, which might
5038 have taken more than one insn to make. If the constant were
5039 not a valid argument to the AND but took only one insn to make,
5040 this is no worse, but if it took more than one insn, it will
5041 be better. */
5043 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5044 && REG_P (XEXP (SET_SRC (x), 0))
5045 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5046 && REG_P (SET_DEST (x))
5047 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5048 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5049 && XEXP (*split, 0) == SET_DEST (x)
5050 && XEXP (*split, 1) == const0_rtx)
5052 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5053 XEXP (SET_SRC (x), 0),
5054 pos, NULL_RTX, 1, 1, 0, 0);
5055 if (extraction != 0)
5057 SUBST (SET_SRC (x), extraction);
5058 return find_split_point (loc, insn, false);
5061 break;
5063 case NE:
5064 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5065 is known to be on, this can be converted into a NEG of a shift. */
5066 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5067 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5068 && 1 <= (pos = exact_log2
5069 (nonzero_bits (XEXP (SET_SRC (x), 0),
5070 GET_MODE (XEXP (SET_SRC (x), 0))))))
5072 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5074 SUBST (SET_SRC (x),
5075 gen_rtx_NEG (mode,
5076 gen_rtx_LSHIFTRT (mode,
5077 XEXP (SET_SRC (x), 0),
5078 GEN_INT (pos))));
5080 split = find_split_point (&SET_SRC (x), insn, true);
5081 if (split && split != &SET_SRC (x))
5082 return split;
5084 break;
5086 case SIGN_EXTEND:
5087 inner = XEXP (SET_SRC (x), 0);
5089 /* We can't optimize if either mode is a partial integer
5090 mode as we don't know how many bits are significant
5091 in those modes. */
5092 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5093 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5094 break;
5096 pos = 0;
5097 len = GET_MODE_PRECISION (inner_mode);
5098 unsignedp = 0;
5099 break;
5101 case SIGN_EXTRACT:
5102 case ZERO_EXTRACT:
5103 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5104 &inner_mode)
5105 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5106 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5108 inner = XEXP (SET_SRC (x), 0);
5109 len = INTVAL (XEXP (SET_SRC (x), 1));
5110 pos = INTVAL (XEXP (SET_SRC (x), 2));
5112 if (BITS_BIG_ENDIAN)
5113 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5114 unsignedp = (code == ZERO_EXTRACT);
5116 break;
5118 default:
5119 break;
5122 if (len && pos >= 0
5123 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))
5124 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5126 /* For unsigned, we have a choice of a shift followed by an
5127 AND or two shifts. Use two shifts for field sizes where the
5128 constant might be too large. We assume here that we can
5129 always at least get 8-bit constants in an AND insn, which is
5130 true for every current RISC. */
5132 if (unsignedp && len <= 8)
5134 unsigned HOST_WIDE_INT mask
5135 = (HOST_WIDE_INT_1U << len) - 1;
5136 SUBST (SET_SRC (x),
5137 gen_rtx_AND (mode,
5138 gen_rtx_LSHIFTRT
5139 (mode, gen_lowpart (mode, inner),
5140 GEN_INT (pos)),
5141 gen_int_mode (mask, mode)));
5143 split = find_split_point (&SET_SRC (x), insn, true);
5144 if (split && split != &SET_SRC (x))
5145 return split;
5147 else
5149 SUBST (SET_SRC (x),
5150 gen_rtx_fmt_ee
5151 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5152 gen_rtx_ASHIFT (mode,
5153 gen_lowpart (mode, inner),
5154 GEN_INT (GET_MODE_PRECISION (mode)
5155 - len - pos)),
5156 GEN_INT (GET_MODE_PRECISION (mode) - len)));
5158 split = find_split_point (&SET_SRC (x), insn, true);
5159 if (split && split != &SET_SRC (x))
5160 return split;
5164 /* See if this is a simple operation with a constant as the second
5165 operand. It might be that this constant is out of range and hence
5166 could be used as a split point. */
5167 if (BINARY_P (SET_SRC (x))
5168 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5169 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5170 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5171 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5172 return &XEXP (SET_SRC (x), 1);
5174 /* Finally, see if this is a simple operation with its first operand
5175 not in a register. The operation might require this operand in a
5176 register, so return it as a split point. We can always do this
5177 because if the first operand were another operation, we would have
5178 already found it as a split point. */
5179 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5180 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5181 return &XEXP (SET_SRC (x), 0);
5183 return 0;
5185 case AND:
5186 case IOR:
5187 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5188 it is better to write this as (not (ior A B)) so we can split it.
5189 Similarly for IOR. */
5190 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5192 SUBST (*loc,
5193 gen_rtx_NOT (GET_MODE (x),
5194 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5195 GET_MODE (x),
5196 XEXP (XEXP (x, 0), 0),
5197 XEXP (XEXP (x, 1), 0))));
5198 return find_split_point (loc, insn, set_src);
5201 /* Many RISC machines have a large set of logical insns. If the
5202 second operand is a NOT, put it first so we will try to split the
5203 other operand first. */
5204 if (GET_CODE (XEXP (x, 1)) == NOT)
5206 rtx tem = XEXP (x, 0);
5207 SUBST (XEXP (x, 0), XEXP (x, 1));
5208 SUBST (XEXP (x, 1), tem);
5210 break;
5212 case PLUS:
5213 case MINUS:
5214 /* Canonicalization can produce (minus A (mult B C)), where C is a
5215 constant. It may be better to try splitting (plus (mult B -C) A)
5216 instead if this isn't a multiply by a power of two. */
5217 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5218 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5219 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5221 machine_mode mode = GET_MODE (x);
5222 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5223 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5224 SUBST (*loc, gen_rtx_PLUS (mode,
5225 gen_rtx_MULT (mode,
5226 XEXP (XEXP (x, 1), 0),
5227 gen_int_mode (other_int,
5228 mode)),
5229 XEXP (x, 0)));
5230 return find_split_point (loc, insn, set_src);
5233 /* Split at a multiply-accumulate instruction. However if this is
5234 the SET_SRC, we likely do not have such an instruction and it's
5235 worthless to try this split. */
5236 if (!set_src
5237 && (GET_CODE (XEXP (x, 0)) == MULT
5238 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5239 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5240 return loc;
5242 default:
5243 break;
5246 /* Otherwise, select our actions depending on our rtx class. */
5247 switch (GET_RTX_CLASS (code))
5249 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5250 case RTX_TERNARY:
5251 split = find_split_point (&XEXP (x, 2), insn, false);
5252 if (split)
5253 return split;
5254 /* fall through */
5255 case RTX_BIN_ARITH:
5256 case RTX_COMM_ARITH:
5257 case RTX_COMPARE:
5258 case RTX_COMM_COMPARE:
5259 split = find_split_point (&XEXP (x, 1), insn, false);
5260 if (split)
5261 return split;
5262 /* fall through */
5263 case RTX_UNARY:
5264 /* Some machines have (and (shift ...) ...) insns. If X is not
5265 an AND, but XEXP (X, 0) is, use it as our split point. */
5266 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5267 return &XEXP (x, 0);
5269 split = find_split_point (&XEXP (x, 0), insn, false);
5270 if (split)
5271 return split;
5272 return loc;
5274 default:
5275 /* Otherwise, we don't have a split point. */
5276 return 0;
5280 /* Throughout X, replace FROM with TO, and return the result.
5281 The result is TO if X is FROM;
5282 otherwise the result is X, but its contents may have been modified.
5283 If they were modified, a record was made in undobuf so that
5284 undo_all will (among other things) return X to its original state.
5286 If the number of changes necessary is too much to record to undo,
5287 the excess changes are not made, so the result is invalid.
5288 The changes already made can still be undone.
5289 undobuf.num_undo is incremented for such changes, so by testing that
5290 the caller can tell whether the result is valid.
5292 `n_occurrences' is incremented each time FROM is replaced.
5294 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5296 IN_COND is nonzero if we are at the top level of a condition.
5298 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5299 by copying if `n_occurrences' is nonzero. */
5301 static rtx
5302 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5304 enum rtx_code code = GET_CODE (x);
5305 machine_mode op0_mode = VOIDmode;
5306 const char *fmt;
5307 int len, i;
5308 rtx new_rtx;
5310 /* Two expressions are equal if they are identical copies of a shared
5311 RTX or if they are both registers with the same register number
5312 and mode. */
5314 #define COMBINE_RTX_EQUAL_P(X,Y) \
5315 ((X) == (Y) \
5316 || (REG_P (X) && REG_P (Y) \
5317 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5319 /* Do not substitute into clobbers of regs -- this will never result in
5320 valid RTL. */
5321 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5322 return x;
5324 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5326 n_occurrences++;
5327 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5330 /* If X and FROM are the same register but different modes, they
5331 will not have been seen as equal above. However, the log links code
5332 will make a LOG_LINKS entry for that case. If we do nothing, we
5333 will try to rerecognize our original insn and, when it succeeds,
5334 we will delete the feeding insn, which is incorrect.
5336 So force this insn not to match in this (rare) case. */
5337 if (! in_dest && code == REG && REG_P (from)
5338 && reg_overlap_mentioned_p (x, from))
5339 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5341 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5342 of which may contain things that can be combined. */
5343 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5344 return x;
5346 /* It is possible to have a subexpression appear twice in the insn.
5347 Suppose that FROM is a register that appears within TO.
5348 Then, after that subexpression has been scanned once by `subst',
5349 the second time it is scanned, TO may be found. If we were
5350 to scan TO here, we would find FROM within it and create a
5351 self-referent rtl structure which is completely wrong. */
5352 if (COMBINE_RTX_EQUAL_P (x, to))
5353 return to;
5355 /* Parallel asm_operands need special attention because all of the
5356 inputs are shared across the arms. Furthermore, unsharing the
5357 rtl results in recognition failures. Failure to handle this case
5358 specially can result in circular rtl.
5360 Solve this by doing a normal pass across the first entry of the
5361 parallel, and only processing the SET_DESTs of the subsequent
5362 entries. Ug. */
5364 if (code == PARALLEL
5365 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5366 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5368 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5370 /* If this substitution failed, this whole thing fails. */
5371 if (GET_CODE (new_rtx) == CLOBBER
5372 && XEXP (new_rtx, 0) == const0_rtx)
5373 return new_rtx;
5375 SUBST (XVECEXP (x, 0, 0), new_rtx);
5377 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5379 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5381 if (!REG_P (dest)
5382 && GET_CODE (dest) != CC0
5383 && GET_CODE (dest) != PC)
5385 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5387 /* If this substitution failed, this whole thing fails. */
5388 if (GET_CODE (new_rtx) == CLOBBER
5389 && XEXP (new_rtx, 0) == const0_rtx)
5390 return new_rtx;
5392 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5396 else
5398 len = GET_RTX_LENGTH (code);
5399 fmt = GET_RTX_FORMAT (code);
5401 /* We don't need to process a SET_DEST that is a register, CC0,
5402 or PC, so set up to skip this common case. All other cases
5403 where we want to suppress replacing something inside a
5404 SET_SRC are handled via the IN_DEST operand. */
5405 if (code == SET
5406 && (REG_P (SET_DEST (x))
5407 || GET_CODE (SET_DEST (x)) == CC0
5408 || GET_CODE (SET_DEST (x)) == PC))
5409 fmt = "ie";
5411 /* Trying to simplify the operands of a widening MULT is not likely
5412 to create RTL matching a machine insn. */
5413 if (code == MULT
5414 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5415 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5416 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5417 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5418 && REG_P (XEXP (XEXP (x, 0), 0))
5419 && REG_P (XEXP (XEXP (x, 1), 0))
5420 && from == to)
5421 return x;
5424 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5425 constant. */
5426 if (fmt[0] == 'e')
5427 op0_mode = GET_MODE (XEXP (x, 0));
5429 for (i = 0; i < len; i++)
5431 if (fmt[i] == 'E')
5433 int j;
5434 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5436 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5438 new_rtx = (unique_copy && n_occurrences
5439 ? copy_rtx (to) : to);
5440 n_occurrences++;
5442 else
5444 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5445 unique_copy);
5447 /* If this substitution failed, this whole thing
5448 fails. */
5449 if (GET_CODE (new_rtx) == CLOBBER
5450 && XEXP (new_rtx, 0) == const0_rtx)
5451 return new_rtx;
5454 SUBST (XVECEXP (x, i, j), new_rtx);
5457 else if (fmt[i] == 'e')
5459 /* If this is a register being set, ignore it. */
5460 new_rtx = XEXP (x, i);
5461 if (in_dest
5462 && i == 0
5463 && (((code == SUBREG || code == ZERO_EXTRACT)
5464 && REG_P (new_rtx))
5465 || code == STRICT_LOW_PART))
5468 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5470 /* In general, don't install a subreg involving two
5471 modes not tieable. It can worsen register
5472 allocation, and can even make invalid reload
5473 insns, since the reg inside may need to be copied
5474 from in the outside mode, and that may be invalid
5475 if it is an fp reg copied in integer mode.
5477 We allow two exceptions to this: It is valid if
5478 it is inside another SUBREG and the mode of that
5479 SUBREG and the mode of the inside of TO is
5480 tieable and it is valid if X is a SET that copies
5481 FROM to CC0. */
5483 if (GET_CODE (to) == SUBREG
5484 && !targetm.modes_tieable_p (GET_MODE (to),
5485 GET_MODE (SUBREG_REG (to)))
5486 && ! (code == SUBREG
5487 && (targetm.modes_tieable_p
5488 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5489 && (!HAVE_cc0
5490 || (! (code == SET
5491 && i == 1
5492 && XEXP (x, 0) == cc0_rtx))))
5493 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5495 if (code == SUBREG
5496 && REG_P (to)
5497 && REGNO (to) < FIRST_PSEUDO_REGISTER
5498 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5499 SUBREG_BYTE (x),
5500 GET_MODE (x)) < 0)
5501 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5503 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5504 n_occurrences++;
5506 else
5507 /* If we are in a SET_DEST, suppress most cases unless we
5508 have gone inside a MEM, in which case we want to
5509 simplify the address. We assume here that things that
5510 are actually part of the destination have their inner
5511 parts in the first expression. This is true for SUBREG,
5512 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5513 things aside from REG and MEM that should appear in a
5514 SET_DEST. */
5515 new_rtx = subst (XEXP (x, i), from, to,
5516 (((in_dest
5517 && (code == SUBREG || code == STRICT_LOW_PART
5518 || code == ZERO_EXTRACT))
5519 || code == SET)
5520 && i == 0),
5521 code == IF_THEN_ELSE && i == 0,
5522 unique_copy);
5524 /* If we found that we will have to reject this combination,
5525 indicate that by returning the CLOBBER ourselves, rather than
5526 an expression containing it. This will speed things up as
5527 well as prevent accidents where two CLOBBERs are considered
5528 to be equal, thus producing an incorrect simplification. */
5530 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5531 return new_rtx;
5533 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5535 machine_mode mode = GET_MODE (x);
5537 x = simplify_subreg (GET_MODE (x), new_rtx,
5538 GET_MODE (SUBREG_REG (x)),
5539 SUBREG_BYTE (x));
5540 if (! x)
5541 x = gen_rtx_CLOBBER (mode, const0_rtx);
5543 else if (CONST_SCALAR_INT_P (new_rtx)
5544 && GET_CODE (x) == ZERO_EXTEND)
5546 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5547 new_rtx, GET_MODE (XEXP (x, 0)));
5548 gcc_assert (x);
5550 else
5551 SUBST (XEXP (x, i), new_rtx);
5556 /* Check if we are loading something from the constant pool via float
5557 extension; in this case we would undo compress_float_constant
5558 optimization and degenerate constant load to an immediate value. */
5559 if (GET_CODE (x) == FLOAT_EXTEND
5560 && MEM_P (XEXP (x, 0))
5561 && MEM_READONLY_P (XEXP (x, 0)))
5563 rtx tmp = avoid_constant_pool_reference (x);
5564 if (x != tmp)
5565 return x;
5568 /* Try to simplify X. If the simplification changed the code, it is likely
5569 that further simplification will help, so loop, but limit the number
5570 of repetitions that will be performed. */
5572 for (i = 0; i < 4; i++)
5574 /* If X is sufficiently simple, don't bother trying to do anything
5575 with it. */
5576 if (code != CONST_INT && code != REG && code != CLOBBER)
5577 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5579 if (GET_CODE (x) == code)
5580 break;
5582 code = GET_CODE (x);
5584 /* We no longer know the original mode of operand 0 since we
5585 have changed the form of X) */
5586 op0_mode = VOIDmode;
5589 return x;
5592 /* If X is a commutative operation whose operands are not in the canonical
5593 order, use substitutions to swap them. */
5595 static void
5596 maybe_swap_commutative_operands (rtx x)
5598 if (COMMUTATIVE_ARITH_P (x)
5599 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5601 rtx temp = XEXP (x, 0);
5602 SUBST (XEXP (x, 0), XEXP (x, 1));
5603 SUBST (XEXP (x, 1), temp);
5607 /* Simplify X, a piece of RTL. We just operate on the expression at the
5608 outer level; call `subst' to simplify recursively. Return the new
5609 expression.
5611 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5612 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5613 of a condition. */
5615 static rtx
5616 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5617 int in_cond)
5619 enum rtx_code code = GET_CODE (x);
5620 machine_mode mode = GET_MODE (x);
5621 scalar_int_mode int_mode;
5622 rtx temp;
5623 int i;
5625 /* If this is a commutative operation, put a constant last and a complex
5626 expression first. We don't need to do this for comparisons here. */
5627 maybe_swap_commutative_operands (x);
5629 /* Try to fold this expression in case we have constants that weren't
5630 present before. */
5631 temp = 0;
5632 switch (GET_RTX_CLASS (code))
5634 case RTX_UNARY:
5635 if (op0_mode == VOIDmode)
5636 op0_mode = GET_MODE (XEXP (x, 0));
5637 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5638 break;
5639 case RTX_COMPARE:
5640 case RTX_COMM_COMPARE:
5642 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5643 if (cmp_mode == VOIDmode)
5645 cmp_mode = GET_MODE (XEXP (x, 1));
5646 if (cmp_mode == VOIDmode)
5647 cmp_mode = op0_mode;
5649 temp = simplify_relational_operation (code, mode, cmp_mode,
5650 XEXP (x, 0), XEXP (x, 1));
5652 break;
5653 case RTX_COMM_ARITH:
5654 case RTX_BIN_ARITH:
5655 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5656 break;
5657 case RTX_BITFIELD_OPS:
5658 case RTX_TERNARY:
5659 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5660 XEXP (x, 1), XEXP (x, 2));
5661 break;
5662 default:
5663 break;
5666 if (temp)
5668 x = temp;
5669 code = GET_CODE (temp);
5670 op0_mode = VOIDmode;
5671 mode = GET_MODE (temp);
5674 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5675 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5676 things. Check for cases where both arms are testing the same
5677 condition.
5679 Don't do anything if all operands are very simple. */
5681 if ((BINARY_P (x)
5682 && ((!OBJECT_P (XEXP (x, 0))
5683 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5684 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5685 || (!OBJECT_P (XEXP (x, 1))
5686 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5687 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5688 || (UNARY_P (x)
5689 && (!OBJECT_P (XEXP (x, 0))
5690 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5691 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5693 rtx cond, true_rtx, false_rtx;
5695 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5696 if (cond != 0
5697 /* If everything is a comparison, what we have is highly unlikely
5698 to be simpler, so don't use it. */
5699 && ! (COMPARISON_P (x)
5700 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5702 rtx cop1 = const0_rtx;
5703 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5705 if (cond_code == NE && COMPARISON_P (cond))
5706 return x;
5708 /* Simplify the alternative arms; this may collapse the true and
5709 false arms to store-flag values. Be careful to use copy_rtx
5710 here since true_rtx or false_rtx might share RTL with x as a
5711 result of the if_then_else_cond call above. */
5712 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5713 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5715 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5716 is unlikely to be simpler. */
5717 if (general_operand (true_rtx, VOIDmode)
5718 && general_operand (false_rtx, VOIDmode))
5720 enum rtx_code reversed;
5722 /* Restarting if we generate a store-flag expression will cause
5723 us to loop. Just drop through in this case. */
5725 /* If the result values are STORE_FLAG_VALUE and zero, we can
5726 just make the comparison operation. */
5727 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5728 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5729 cond, cop1);
5730 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5731 && ((reversed = reversed_comparison_code_parts
5732 (cond_code, cond, cop1, NULL))
5733 != UNKNOWN))
5734 x = simplify_gen_relational (reversed, mode, VOIDmode,
5735 cond, cop1);
5737 /* Likewise, we can make the negate of a comparison operation
5738 if the result values are - STORE_FLAG_VALUE and zero. */
5739 else if (CONST_INT_P (true_rtx)
5740 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5741 && false_rtx == const0_rtx)
5742 x = simplify_gen_unary (NEG, mode,
5743 simplify_gen_relational (cond_code,
5744 mode, VOIDmode,
5745 cond, cop1),
5746 mode);
5747 else if (CONST_INT_P (false_rtx)
5748 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5749 && true_rtx == const0_rtx
5750 && ((reversed = reversed_comparison_code_parts
5751 (cond_code, cond, cop1, NULL))
5752 != UNKNOWN))
5753 x = simplify_gen_unary (NEG, mode,
5754 simplify_gen_relational (reversed,
5755 mode, VOIDmode,
5756 cond, cop1),
5757 mode);
5758 else
5759 return gen_rtx_IF_THEN_ELSE (mode,
5760 simplify_gen_relational (cond_code,
5761 mode,
5762 VOIDmode,
5763 cond,
5764 cop1),
5765 true_rtx, false_rtx);
5767 code = GET_CODE (x);
5768 op0_mode = VOIDmode;
5773 /* First see if we can apply the inverse distributive law. */
5774 if (code == PLUS || code == MINUS
5775 || code == AND || code == IOR || code == XOR)
5777 x = apply_distributive_law (x);
5778 code = GET_CODE (x);
5779 op0_mode = VOIDmode;
5782 /* If CODE is an associative operation not otherwise handled, see if we
5783 can associate some operands. This can win if they are constants or
5784 if they are logically related (i.e. (a & b) & a). */
5785 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5786 || code == AND || code == IOR || code == XOR
5787 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5788 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5789 || (flag_associative_math && FLOAT_MODE_P (mode))))
5791 if (GET_CODE (XEXP (x, 0)) == code)
5793 rtx other = XEXP (XEXP (x, 0), 0);
5794 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5795 rtx inner_op1 = XEXP (x, 1);
5796 rtx inner;
5798 /* Make sure we pass the constant operand if any as the second
5799 one if this is a commutative operation. */
5800 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5801 std::swap (inner_op0, inner_op1);
5802 inner = simplify_binary_operation (code == MINUS ? PLUS
5803 : code == DIV ? MULT
5804 : code,
5805 mode, inner_op0, inner_op1);
5807 /* For commutative operations, try the other pair if that one
5808 didn't simplify. */
5809 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5811 other = XEXP (XEXP (x, 0), 1);
5812 inner = simplify_binary_operation (code, mode,
5813 XEXP (XEXP (x, 0), 0),
5814 XEXP (x, 1));
5817 if (inner)
5818 return simplify_gen_binary (code, mode, other, inner);
5822 /* A little bit of algebraic simplification here. */
5823 switch (code)
5825 case MEM:
5826 /* Ensure that our address has any ASHIFTs converted to MULT in case
5827 address-recognizing predicates are called later. */
5828 temp = make_compound_operation (XEXP (x, 0), MEM);
5829 SUBST (XEXP (x, 0), temp);
5830 break;
5832 case SUBREG:
5833 if (op0_mode == VOIDmode)
5834 op0_mode = GET_MODE (SUBREG_REG (x));
5836 /* See if this can be moved to simplify_subreg. */
5837 if (CONSTANT_P (SUBREG_REG (x))
5838 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5839 /* Don't call gen_lowpart if the inner mode
5840 is VOIDmode and we cannot simplify it, as SUBREG without
5841 inner mode is invalid. */
5842 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5843 || gen_lowpart_common (mode, SUBREG_REG (x))))
5844 return gen_lowpart (mode, SUBREG_REG (x));
5846 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5847 break;
5849 rtx temp;
5850 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5851 SUBREG_BYTE (x));
5852 if (temp)
5853 return temp;
5855 /* If op is known to have all lower bits zero, the result is zero. */
5856 scalar_int_mode int_mode, int_op0_mode;
5857 if (!in_dest
5858 && is_a <scalar_int_mode> (mode, &int_mode)
5859 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5860 && (GET_MODE_PRECISION (int_mode)
5861 < GET_MODE_PRECISION (int_op0_mode))
5862 && (subreg_lowpart_offset (int_mode, int_op0_mode)
5863 == SUBREG_BYTE (x))
5864 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5865 && (nonzero_bits (SUBREG_REG (x), int_op0_mode)
5866 & GET_MODE_MASK (int_mode)) == 0)
5867 return CONST0_RTX (int_mode);
5870 /* Don't change the mode of the MEM if that would change the meaning
5871 of the address. */
5872 if (MEM_P (SUBREG_REG (x))
5873 && (MEM_VOLATILE_P (SUBREG_REG (x))
5874 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5875 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5876 return gen_rtx_CLOBBER (mode, const0_rtx);
5878 /* Note that we cannot do any narrowing for non-constants since
5879 we might have been counting on using the fact that some bits were
5880 zero. We now do this in the SET. */
5882 break;
5884 case NEG:
5885 temp = expand_compound_operation (XEXP (x, 0));
5887 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5888 replaced by (lshiftrt X C). This will convert
5889 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5891 if (GET_CODE (temp) == ASHIFTRT
5892 && CONST_INT_P (XEXP (temp, 1))
5893 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
5894 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5895 INTVAL (XEXP (temp, 1)));
5897 /* If X has only a single bit that might be nonzero, say, bit I, convert
5898 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5899 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5900 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5901 or a SUBREG of one since we'd be making the expression more
5902 complex if it was just a register. */
5904 if (!REG_P (temp)
5905 && ! (GET_CODE (temp) == SUBREG
5906 && REG_P (SUBREG_REG (temp)))
5907 && is_a <scalar_int_mode> (mode, &int_mode)
5908 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5910 rtx temp1 = simplify_shift_const
5911 (NULL_RTX, ASHIFTRT, int_mode,
5912 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5913 GET_MODE_PRECISION (int_mode) - 1 - i),
5914 GET_MODE_PRECISION (int_mode) - 1 - i);
5916 /* If all we did was surround TEMP with the two shifts, we
5917 haven't improved anything, so don't use it. Otherwise,
5918 we are better off with TEMP1. */
5919 if (GET_CODE (temp1) != ASHIFTRT
5920 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5921 || XEXP (XEXP (temp1, 0), 0) != temp)
5922 return temp1;
5924 break;
5926 case TRUNCATE:
5927 /* We can't handle truncation to a partial integer mode here
5928 because we don't know the real bitsize of the partial
5929 integer mode. */
5930 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5931 break;
5933 if (HWI_COMPUTABLE_MODE_P (mode))
5934 SUBST (XEXP (x, 0),
5935 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5936 GET_MODE_MASK (mode), 0));
5938 /* We can truncate a constant value and return it. */
5939 if (CONST_INT_P (XEXP (x, 0)))
5940 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5942 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5943 whose value is a comparison can be replaced with a subreg if
5944 STORE_FLAG_VALUE permits. */
5945 if (HWI_COMPUTABLE_MODE_P (mode)
5946 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5947 && (temp = get_last_value (XEXP (x, 0)))
5948 && COMPARISON_P (temp))
5949 return gen_lowpart (mode, XEXP (x, 0));
5950 break;
5952 case CONST:
5953 /* (const (const X)) can become (const X). Do it this way rather than
5954 returning the inner CONST since CONST can be shared with a
5955 REG_EQUAL note. */
5956 if (GET_CODE (XEXP (x, 0)) == CONST)
5957 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5958 break;
5960 case LO_SUM:
5961 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5962 can add in an offset. find_split_point will split this address up
5963 again if it doesn't match. */
5964 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5965 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5966 return XEXP (x, 1);
5967 break;
5969 case PLUS:
5970 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5971 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5972 bit-field and can be replaced by either a sign_extend or a
5973 sign_extract. The `and' may be a zero_extend and the two
5974 <c>, -<c> constants may be reversed. */
5975 if (GET_CODE (XEXP (x, 0)) == XOR
5976 && is_a <scalar_int_mode> (mode, &int_mode)
5977 && CONST_INT_P (XEXP (x, 1))
5978 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5979 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5980 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5981 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5982 && HWI_COMPUTABLE_MODE_P (int_mode)
5983 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5984 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5985 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5986 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5987 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5988 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5989 == (unsigned int) i + 1))))
5990 return simplify_shift_const
5991 (NULL_RTX, ASHIFTRT, int_mode,
5992 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5993 XEXP (XEXP (XEXP (x, 0), 0), 0),
5994 GET_MODE_PRECISION (int_mode) - (i + 1)),
5995 GET_MODE_PRECISION (int_mode) - (i + 1));
5997 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5998 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5999 the bitsize of the mode - 1. This allows simplification of
6000 "a = (b & 8) == 0;" */
6001 if (XEXP (x, 1) == constm1_rtx
6002 && !REG_P (XEXP (x, 0))
6003 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6004 && REG_P (SUBREG_REG (XEXP (x, 0))))
6005 && is_a <scalar_int_mode> (mode, &int_mode)
6006 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6007 return simplify_shift_const
6008 (NULL_RTX, ASHIFTRT, int_mode,
6009 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6010 gen_rtx_XOR (int_mode, XEXP (x, 0),
6011 const1_rtx),
6012 GET_MODE_PRECISION (int_mode) - 1),
6013 GET_MODE_PRECISION (int_mode) - 1);
6015 /* If we are adding two things that have no bits in common, convert
6016 the addition into an IOR. This will often be further simplified,
6017 for example in cases like ((a & 1) + (a & 2)), which can
6018 become a & 3. */
6020 if (HWI_COMPUTABLE_MODE_P (mode)
6021 && (nonzero_bits (XEXP (x, 0), mode)
6022 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6024 /* Try to simplify the expression further. */
6025 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6026 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6028 /* If we could, great. If not, do not go ahead with the IOR
6029 replacement, since PLUS appears in many special purpose
6030 address arithmetic instructions. */
6031 if (GET_CODE (temp) != CLOBBER
6032 && (GET_CODE (temp) != IOR
6033 || ((XEXP (temp, 0) != XEXP (x, 0)
6034 || XEXP (temp, 1) != XEXP (x, 1))
6035 && (XEXP (temp, 0) != XEXP (x, 1)
6036 || XEXP (temp, 1) != XEXP (x, 0)))))
6037 return temp;
6040 /* Canonicalize x + x into x << 1. */
6041 if (GET_MODE_CLASS (mode) == MODE_INT
6042 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6043 && !side_effects_p (XEXP (x, 0)))
6044 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6046 break;
6048 case MINUS:
6049 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6050 (and <foo> (const_int pow2-1)) */
6051 if (is_a <scalar_int_mode> (mode, &int_mode)
6052 && GET_CODE (XEXP (x, 1)) == AND
6053 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6054 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6055 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6056 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6057 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6058 break;
6060 case MULT:
6061 /* If we have (mult (plus A B) C), apply the distributive law and then
6062 the inverse distributive law to see if things simplify. This
6063 occurs mostly in addresses, often when unrolling loops. */
6065 if (GET_CODE (XEXP (x, 0)) == PLUS)
6067 rtx result = distribute_and_simplify_rtx (x, 0);
6068 if (result)
6069 return result;
6072 /* Try simplify a*(b/c) as (a*b)/c. */
6073 if (FLOAT_MODE_P (mode) && flag_associative_math
6074 && GET_CODE (XEXP (x, 0)) == DIV)
6076 rtx tem = simplify_binary_operation (MULT, mode,
6077 XEXP (XEXP (x, 0), 0),
6078 XEXP (x, 1));
6079 if (tem)
6080 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6082 break;
6084 case UDIV:
6085 /* If this is a divide by a power of two, treat it as a shift if
6086 its first operand is a shift. */
6087 if (is_a <scalar_int_mode> (mode, &int_mode)
6088 && CONST_INT_P (XEXP (x, 1))
6089 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6090 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6091 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6092 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6093 || GET_CODE (XEXP (x, 0)) == ROTATE
6094 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6095 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6096 XEXP (x, 0), i);
6097 break;
6099 case EQ: case NE:
6100 case GT: case GTU: case GE: case GEU:
6101 case LT: case LTU: case LE: case LEU:
6102 case UNEQ: case LTGT:
6103 case UNGT: case UNGE:
6104 case UNLT: case UNLE:
6105 case UNORDERED: case ORDERED:
6106 /* If the first operand is a condition code, we can't do anything
6107 with it. */
6108 if (GET_CODE (XEXP (x, 0)) == COMPARE
6109 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6110 && ! CC0_P (XEXP (x, 0))))
6112 rtx op0 = XEXP (x, 0);
6113 rtx op1 = XEXP (x, 1);
6114 enum rtx_code new_code;
6116 if (GET_CODE (op0) == COMPARE)
6117 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6119 /* Simplify our comparison, if possible. */
6120 new_code = simplify_comparison (code, &op0, &op1);
6122 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6123 if only the low-order bit is possibly nonzero in X (such as when
6124 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6125 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6126 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6127 (plus X 1).
6129 Remove any ZERO_EXTRACT we made when thinking this was a
6130 comparison. It may now be simpler to use, e.g., an AND. If a
6131 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6132 the call to make_compound_operation in the SET case.
6134 Don't apply these optimizations if the caller would
6135 prefer a comparison rather than a value.
6136 E.g., for the condition in an IF_THEN_ELSE most targets need
6137 an explicit comparison. */
6139 if (in_cond)
6142 else if (STORE_FLAG_VALUE == 1
6143 && new_code == NE
6144 && is_int_mode (mode, &int_mode)
6145 && op1 == const0_rtx
6146 && int_mode == GET_MODE (op0)
6147 && nonzero_bits (op0, int_mode) == 1)
6148 return gen_lowpart (int_mode,
6149 expand_compound_operation (op0));
6151 else if (STORE_FLAG_VALUE == 1
6152 && new_code == NE
6153 && is_int_mode (mode, &int_mode)
6154 && op1 == const0_rtx
6155 && int_mode == GET_MODE (op0)
6156 && (num_sign_bit_copies (op0, int_mode)
6157 == GET_MODE_PRECISION (int_mode)))
6159 op0 = expand_compound_operation (op0);
6160 return simplify_gen_unary (NEG, int_mode,
6161 gen_lowpart (int_mode, op0),
6162 int_mode);
6165 else if (STORE_FLAG_VALUE == 1
6166 && new_code == EQ
6167 && is_int_mode (mode, &int_mode)
6168 && op1 == const0_rtx
6169 && int_mode == GET_MODE (op0)
6170 && nonzero_bits (op0, int_mode) == 1)
6172 op0 = expand_compound_operation (op0);
6173 return simplify_gen_binary (XOR, int_mode,
6174 gen_lowpart (int_mode, op0),
6175 const1_rtx);
6178 else if (STORE_FLAG_VALUE == 1
6179 && new_code == EQ
6180 && is_int_mode (mode, &int_mode)
6181 && op1 == const0_rtx
6182 && int_mode == GET_MODE (op0)
6183 && (num_sign_bit_copies (op0, int_mode)
6184 == GET_MODE_PRECISION (int_mode)))
6186 op0 = expand_compound_operation (op0);
6187 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6190 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6191 those above. */
6192 if (in_cond)
6195 else if (STORE_FLAG_VALUE == -1
6196 && new_code == NE
6197 && is_int_mode (mode, &int_mode)
6198 && op1 == const0_rtx
6199 && int_mode == GET_MODE (op0)
6200 && (num_sign_bit_copies (op0, int_mode)
6201 == GET_MODE_PRECISION (int_mode)))
6202 return gen_lowpart (int_mode, expand_compound_operation (op0));
6204 else if (STORE_FLAG_VALUE == -1
6205 && new_code == NE
6206 && is_int_mode (mode, &int_mode)
6207 && op1 == const0_rtx
6208 && int_mode == GET_MODE (op0)
6209 && nonzero_bits (op0, int_mode) == 1)
6211 op0 = expand_compound_operation (op0);
6212 return simplify_gen_unary (NEG, int_mode,
6213 gen_lowpart (int_mode, op0),
6214 int_mode);
6217 else if (STORE_FLAG_VALUE == -1
6218 && new_code == EQ
6219 && is_int_mode (mode, &int_mode)
6220 && op1 == const0_rtx
6221 && int_mode == GET_MODE (op0)
6222 && (num_sign_bit_copies (op0, int_mode)
6223 == GET_MODE_PRECISION (int_mode)))
6225 op0 = expand_compound_operation (op0);
6226 return simplify_gen_unary (NOT, int_mode,
6227 gen_lowpart (int_mode, op0),
6228 int_mode);
6231 /* If X is 0/1, (eq X 0) is X-1. */
6232 else if (STORE_FLAG_VALUE == -1
6233 && new_code == EQ
6234 && is_int_mode (mode, &int_mode)
6235 && op1 == const0_rtx
6236 && int_mode == GET_MODE (op0)
6237 && nonzero_bits (op0, int_mode) == 1)
6239 op0 = expand_compound_operation (op0);
6240 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6243 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6244 one bit that might be nonzero, we can convert (ne x 0) to
6245 (ashift x c) where C puts the bit in the sign bit. Remove any
6246 AND with STORE_FLAG_VALUE when we are done, since we are only
6247 going to test the sign bit. */
6248 if (new_code == NE
6249 && is_int_mode (mode, &int_mode)
6250 && HWI_COMPUTABLE_MODE_P (int_mode)
6251 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6252 && op1 == const0_rtx
6253 && int_mode == GET_MODE (op0)
6254 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6256 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6257 expand_compound_operation (op0),
6258 GET_MODE_PRECISION (int_mode) - 1 - i);
6259 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6260 return XEXP (x, 0);
6261 else
6262 return x;
6265 /* If the code changed, return a whole new comparison.
6266 We also need to avoid using SUBST in cases where
6267 simplify_comparison has widened a comparison with a CONST_INT,
6268 since in that case the wider CONST_INT may fail the sanity
6269 checks in do_SUBST. */
6270 if (new_code != code
6271 || (CONST_INT_P (op1)
6272 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6273 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6274 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6276 /* Otherwise, keep this operation, but maybe change its operands.
6277 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6278 SUBST (XEXP (x, 0), op0);
6279 SUBST (XEXP (x, 1), op1);
6281 break;
6283 case IF_THEN_ELSE:
6284 return simplify_if_then_else (x);
6286 case ZERO_EXTRACT:
6287 case SIGN_EXTRACT:
6288 case ZERO_EXTEND:
6289 case SIGN_EXTEND:
6290 /* If we are processing SET_DEST, we are done. */
6291 if (in_dest)
6292 return x;
6294 return expand_compound_operation (x);
6296 case SET:
6297 return simplify_set (x);
6299 case AND:
6300 case IOR:
6301 return simplify_logical (x);
6303 case ASHIFT:
6304 case LSHIFTRT:
6305 case ASHIFTRT:
6306 case ROTATE:
6307 case ROTATERT:
6308 /* If this is a shift by a constant amount, simplify it. */
6309 if (CONST_INT_P (XEXP (x, 1)))
6310 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6311 INTVAL (XEXP (x, 1)));
6313 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6314 SUBST (XEXP (x, 1),
6315 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6316 (HOST_WIDE_INT_1U
6317 << exact_log2 (GET_MODE_UNIT_BITSIZE
6318 (GET_MODE (x))))
6319 - 1,
6320 0));
6321 break;
6323 default:
6324 break;
6327 return x;
6330 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6332 static rtx
6333 simplify_if_then_else (rtx x)
6335 machine_mode mode = GET_MODE (x);
6336 rtx cond = XEXP (x, 0);
6337 rtx true_rtx = XEXP (x, 1);
6338 rtx false_rtx = XEXP (x, 2);
6339 enum rtx_code true_code = GET_CODE (cond);
6340 int comparison_p = COMPARISON_P (cond);
6341 rtx temp;
6342 int i;
6343 enum rtx_code false_code;
6344 rtx reversed;
6345 scalar_int_mode int_mode, inner_mode;
6347 /* Simplify storing of the truth value. */
6348 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6349 return simplify_gen_relational (true_code, mode, VOIDmode,
6350 XEXP (cond, 0), XEXP (cond, 1));
6352 /* Also when the truth value has to be reversed. */
6353 if (comparison_p
6354 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6355 && (reversed = reversed_comparison (cond, mode)))
6356 return reversed;
6358 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6359 in it is being compared against certain values. Get the true and false
6360 comparisons and see if that says anything about the value of each arm. */
6362 if (comparison_p
6363 && ((false_code = reversed_comparison_code (cond, NULL))
6364 != UNKNOWN)
6365 && REG_P (XEXP (cond, 0)))
6367 HOST_WIDE_INT nzb;
6368 rtx from = XEXP (cond, 0);
6369 rtx true_val = XEXP (cond, 1);
6370 rtx false_val = true_val;
6371 int swapped = 0;
6373 /* If FALSE_CODE is EQ, swap the codes and arms. */
6375 if (false_code == EQ)
6377 swapped = 1, true_code = EQ, false_code = NE;
6378 std::swap (true_rtx, false_rtx);
6381 scalar_int_mode from_mode;
6382 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6384 /* If we are comparing against zero and the expression being
6385 tested has only a single bit that might be nonzero, that is
6386 its value when it is not equal to zero. Similarly if it is
6387 known to be -1 or 0. */
6388 if (true_code == EQ
6389 && true_val == const0_rtx
6390 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6392 false_code = EQ;
6393 false_val = gen_int_mode (nzb, from_mode);
6395 else if (true_code == EQ
6396 && true_val == const0_rtx
6397 && (num_sign_bit_copies (from, from_mode)
6398 == GET_MODE_PRECISION (from_mode)))
6400 false_code = EQ;
6401 false_val = constm1_rtx;
6405 /* Now simplify an arm if we know the value of the register in the
6406 branch and it is used in the arm. Be careful due to the potential
6407 of locally-shared RTL. */
6409 if (reg_mentioned_p (from, true_rtx))
6410 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6411 from, true_val),
6412 pc_rtx, pc_rtx, 0, 0, 0);
6413 if (reg_mentioned_p (from, false_rtx))
6414 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6415 from, false_val),
6416 pc_rtx, pc_rtx, 0, 0, 0);
6418 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6419 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6421 true_rtx = XEXP (x, 1);
6422 false_rtx = XEXP (x, 2);
6423 true_code = GET_CODE (cond);
6426 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6427 reversed, do so to avoid needing two sets of patterns for
6428 subtract-and-branch insns. Similarly if we have a constant in the true
6429 arm, the false arm is the same as the first operand of the comparison, or
6430 the false arm is more complicated than the true arm. */
6432 if (comparison_p
6433 && reversed_comparison_code (cond, NULL) != UNKNOWN
6434 && (true_rtx == pc_rtx
6435 || (CONSTANT_P (true_rtx)
6436 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6437 || true_rtx == const0_rtx
6438 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6439 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6440 && !OBJECT_P (false_rtx))
6441 || reg_mentioned_p (true_rtx, false_rtx)
6442 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6444 true_code = reversed_comparison_code (cond, NULL);
6445 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6446 SUBST (XEXP (x, 1), false_rtx);
6447 SUBST (XEXP (x, 2), true_rtx);
6449 std::swap (true_rtx, false_rtx);
6450 cond = XEXP (x, 0);
6452 /* It is possible that the conditional has been simplified out. */
6453 true_code = GET_CODE (cond);
6454 comparison_p = COMPARISON_P (cond);
6457 /* If the two arms are identical, we don't need the comparison. */
6459 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6460 return true_rtx;
6462 /* Convert a == b ? b : a to "a". */
6463 if (true_code == EQ && ! side_effects_p (cond)
6464 && !HONOR_NANS (mode)
6465 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6466 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6467 return false_rtx;
6468 else if (true_code == NE && ! side_effects_p (cond)
6469 && !HONOR_NANS (mode)
6470 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6471 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6472 return true_rtx;
6474 /* Look for cases where we have (abs x) or (neg (abs X)). */
6476 if (GET_MODE_CLASS (mode) == MODE_INT
6477 && comparison_p
6478 && XEXP (cond, 1) == const0_rtx
6479 && GET_CODE (false_rtx) == NEG
6480 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6481 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6482 && ! side_effects_p (true_rtx))
6483 switch (true_code)
6485 case GT:
6486 case GE:
6487 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6488 case LT:
6489 case LE:
6490 return
6491 simplify_gen_unary (NEG, mode,
6492 simplify_gen_unary (ABS, mode, true_rtx, mode),
6493 mode);
6494 default:
6495 break;
6498 /* Look for MIN or MAX. */
6500 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6501 && comparison_p
6502 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6503 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6504 && ! side_effects_p (cond))
6505 switch (true_code)
6507 case GE:
6508 case GT:
6509 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6510 case LE:
6511 case LT:
6512 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6513 case GEU:
6514 case GTU:
6515 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6516 case LEU:
6517 case LTU:
6518 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6519 default:
6520 break;
6523 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6524 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6525 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6526 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6527 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6528 neither 1 or -1, but it isn't worth checking for. */
6530 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6531 && comparison_p
6532 && is_int_mode (mode, &int_mode)
6533 && ! side_effects_p (x))
6535 rtx t = make_compound_operation (true_rtx, SET);
6536 rtx f = make_compound_operation (false_rtx, SET);
6537 rtx cond_op0 = XEXP (cond, 0);
6538 rtx cond_op1 = XEXP (cond, 1);
6539 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6540 scalar_int_mode m = int_mode;
6541 rtx z = 0, c1 = NULL_RTX;
6543 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6544 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6545 || GET_CODE (t) == ASHIFT
6546 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6547 && rtx_equal_p (XEXP (t, 0), f))
6548 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6550 /* If an identity-zero op is commutative, check whether there
6551 would be a match if we swapped the operands. */
6552 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6553 || GET_CODE (t) == XOR)
6554 && rtx_equal_p (XEXP (t, 1), f))
6555 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6556 else if (GET_CODE (t) == SIGN_EXTEND
6557 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6558 && (GET_CODE (XEXP (t, 0)) == PLUS
6559 || GET_CODE (XEXP (t, 0)) == MINUS
6560 || GET_CODE (XEXP (t, 0)) == IOR
6561 || GET_CODE (XEXP (t, 0)) == XOR
6562 || GET_CODE (XEXP (t, 0)) == ASHIFT
6563 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6564 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6565 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6566 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6567 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6568 && (num_sign_bit_copies (f, GET_MODE (f))
6569 > (unsigned int)
6570 (GET_MODE_PRECISION (int_mode)
6571 - GET_MODE_PRECISION (inner_mode))))
6573 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6574 extend_op = SIGN_EXTEND;
6575 m = inner_mode;
6577 else if (GET_CODE (t) == SIGN_EXTEND
6578 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6579 && (GET_CODE (XEXP (t, 0)) == PLUS
6580 || GET_CODE (XEXP (t, 0)) == IOR
6581 || GET_CODE (XEXP (t, 0)) == XOR)
6582 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6583 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6584 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6585 && (num_sign_bit_copies (f, GET_MODE (f))
6586 > (unsigned int)
6587 (GET_MODE_PRECISION (int_mode)
6588 - GET_MODE_PRECISION (inner_mode))))
6590 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6591 extend_op = SIGN_EXTEND;
6592 m = inner_mode;
6594 else if (GET_CODE (t) == ZERO_EXTEND
6595 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6596 && (GET_CODE (XEXP (t, 0)) == PLUS
6597 || GET_CODE (XEXP (t, 0)) == MINUS
6598 || GET_CODE (XEXP (t, 0)) == IOR
6599 || GET_CODE (XEXP (t, 0)) == XOR
6600 || GET_CODE (XEXP (t, 0)) == ASHIFT
6601 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6602 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6603 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6604 && HWI_COMPUTABLE_MODE_P (int_mode)
6605 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6606 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6607 && ((nonzero_bits (f, GET_MODE (f))
6608 & ~GET_MODE_MASK (inner_mode))
6609 == 0))
6611 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6612 extend_op = ZERO_EXTEND;
6613 m = inner_mode;
6615 else if (GET_CODE (t) == ZERO_EXTEND
6616 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6617 && (GET_CODE (XEXP (t, 0)) == PLUS
6618 || GET_CODE (XEXP (t, 0)) == IOR
6619 || GET_CODE (XEXP (t, 0)) == XOR)
6620 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6621 && HWI_COMPUTABLE_MODE_P (int_mode)
6622 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6623 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6624 && ((nonzero_bits (f, GET_MODE (f))
6625 & ~GET_MODE_MASK (inner_mode))
6626 == 0))
6628 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6629 extend_op = ZERO_EXTEND;
6630 m = inner_mode;
6633 if (z)
6635 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6636 cond_op0, cond_op1),
6637 pc_rtx, pc_rtx, 0, 0, 0);
6638 temp = simplify_gen_binary (MULT, m, temp,
6639 simplify_gen_binary (MULT, m, c1,
6640 const_true_rtx));
6641 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6642 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6644 if (extend_op != UNKNOWN)
6645 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6647 return temp;
6651 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6652 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6653 negation of a single bit, we can convert this operation to a shift. We
6654 can actually do this more generally, but it doesn't seem worth it. */
6656 if (true_code == NE
6657 && is_a <scalar_int_mode> (mode, &int_mode)
6658 && XEXP (cond, 1) == const0_rtx
6659 && false_rtx == const0_rtx
6660 && CONST_INT_P (true_rtx)
6661 && ((1 == nonzero_bits (XEXP (cond, 0), int_mode)
6662 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6663 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6664 == GET_MODE_PRECISION (int_mode))
6665 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6666 return
6667 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6668 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6670 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6671 non-zero bit in A is C1. */
6672 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6673 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6674 && is_a <scalar_int_mode> (mode, &int_mode)
6675 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6676 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6677 == nonzero_bits (XEXP (cond, 0), inner_mode)
6678 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6680 rtx val = XEXP (cond, 0);
6681 if (inner_mode == int_mode)
6682 return val;
6683 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6684 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6687 return x;
6690 /* Simplify X, a SET expression. Return the new expression. */
6692 static rtx
6693 simplify_set (rtx x)
6695 rtx src = SET_SRC (x);
6696 rtx dest = SET_DEST (x);
6697 machine_mode mode
6698 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6699 rtx_insn *other_insn;
6700 rtx *cc_use;
6701 scalar_int_mode int_mode;
6703 /* (set (pc) (return)) gets written as (return). */
6704 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6705 return src;
6707 /* Now that we know for sure which bits of SRC we are using, see if we can
6708 simplify the expression for the object knowing that we only need the
6709 low-order bits. */
6711 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6713 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6714 SUBST (SET_SRC (x), src);
6717 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6718 the comparison result and try to simplify it unless we already have used
6719 undobuf.other_insn. */
6720 if ((GET_MODE_CLASS (mode) == MODE_CC
6721 || GET_CODE (src) == COMPARE
6722 || CC0_P (dest))
6723 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6724 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6725 && COMPARISON_P (*cc_use)
6726 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6728 enum rtx_code old_code = GET_CODE (*cc_use);
6729 enum rtx_code new_code;
6730 rtx op0, op1, tmp;
6731 int other_changed = 0;
6732 rtx inner_compare = NULL_RTX;
6733 machine_mode compare_mode = GET_MODE (dest);
6735 if (GET_CODE (src) == COMPARE)
6737 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6738 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6740 inner_compare = op0;
6741 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6744 else
6745 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6747 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6748 op0, op1);
6749 if (!tmp)
6750 new_code = old_code;
6751 else if (!CONSTANT_P (tmp))
6753 new_code = GET_CODE (tmp);
6754 op0 = XEXP (tmp, 0);
6755 op1 = XEXP (tmp, 1);
6757 else
6759 rtx pat = PATTERN (other_insn);
6760 undobuf.other_insn = other_insn;
6761 SUBST (*cc_use, tmp);
6763 /* Attempt to simplify CC user. */
6764 if (GET_CODE (pat) == SET)
6766 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6767 if (new_rtx != NULL_RTX)
6768 SUBST (SET_SRC (pat), new_rtx);
6771 /* Convert X into a no-op move. */
6772 SUBST (SET_DEST (x), pc_rtx);
6773 SUBST (SET_SRC (x), pc_rtx);
6774 return x;
6777 /* Simplify our comparison, if possible. */
6778 new_code = simplify_comparison (new_code, &op0, &op1);
6780 #ifdef SELECT_CC_MODE
6781 /* If this machine has CC modes other than CCmode, check to see if we
6782 need to use a different CC mode here. */
6783 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6784 compare_mode = GET_MODE (op0);
6785 else if (inner_compare
6786 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6787 && new_code == old_code
6788 && op0 == XEXP (inner_compare, 0)
6789 && op1 == XEXP (inner_compare, 1))
6790 compare_mode = GET_MODE (inner_compare);
6791 else
6792 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6794 /* If the mode changed, we have to change SET_DEST, the mode in the
6795 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6796 a hard register, just build new versions with the proper mode. If it
6797 is a pseudo, we lose unless it is only time we set the pseudo, in
6798 which case we can safely change its mode. */
6799 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6801 if (can_change_dest_mode (dest, 0, compare_mode))
6803 unsigned int regno = REGNO (dest);
6804 rtx new_dest;
6806 if (regno < FIRST_PSEUDO_REGISTER)
6807 new_dest = gen_rtx_REG (compare_mode, regno);
6808 else
6810 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6811 new_dest = regno_reg_rtx[regno];
6814 SUBST (SET_DEST (x), new_dest);
6815 SUBST (XEXP (*cc_use, 0), new_dest);
6816 other_changed = 1;
6818 dest = new_dest;
6821 #endif /* SELECT_CC_MODE */
6823 /* If the code changed, we have to build a new comparison in
6824 undobuf.other_insn. */
6825 if (new_code != old_code)
6827 int other_changed_previously = other_changed;
6828 unsigned HOST_WIDE_INT mask;
6829 rtx old_cc_use = *cc_use;
6831 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6832 dest, const0_rtx));
6833 other_changed = 1;
6835 /* If the only change we made was to change an EQ into an NE or
6836 vice versa, OP0 has only one bit that might be nonzero, and OP1
6837 is zero, check if changing the user of the condition code will
6838 produce a valid insn. If it won't, we can keep the original code
6839 in that insn by surrounding our operation with an XOR. */
6841 if (((old_code == NE && new_code == EQ)
6842 || (old_code == EQ && new_code == NE))
6843 && ! other_changed_previously && op1 == const0_rtx
6844 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6845 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6847 rtx pat = PATTERN (other_insn), note = 0;
6849 if ((recog_for_combine (&pat, other_insn, &note) < 0
6850 && ! check_asm_operands (pat)))
6852 *cc_use = old_cc_use;
6853 other_changed = 0;
6855 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6856 gen_int_mode (mask,
6857 GET_MODE (op0)));
6862 if (other_changed)
6863 undobuf.other_insn = other_insn;
6865 /* Don't generate a compare of a CC with 0, just use that CC. */
6866 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6868 SUBST (SET_SRC (x), op0);
6869 src = SET_SRC (x);
6871 /* Otherwise, if we didn't previously have the same COMPARE we
6872 want, create it from scratch. */
6873 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6874 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6876 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6877 src = SET_SRC (x);
6880 else
6882 /* Get SET_SRC in a form where we have placed back any
6883 compound expressions. Then do the checks below. */
6884 src = make_compound_operation (src, SET);
6885 SUBST (SET_SRC (x), src);
6888 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6889 and X being a REG or (subreg (reg)), we may be able to convert this to
6890 (set (subreg:m2 x) (op)).
6892 We can always do this if M1 is narrower than M2 because that means that
6893 we only care about the low bits of the result.
6895 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6896 perform a narrower operation than requested since the high-order bits will
6897 be undefined. On machine where it is defined, this transformation is safe
6898 as long as M1 and M2 have the same number of words. */
6900 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6901 && !OBJECT_P (SUBREG_REG (src))
6902 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6903 / UNITS_PER_WORD)
6904 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6905 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6906 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6907 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6908 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6909 GET_MODE (SUBREG_REG (src)),
6910 GET_MODE (src)))
6911 && (REG_P (dest)
6912 || (GET_CODE (dest) == SUBREG
6913 && REG_P (SUBREG_REG (dest)))))
6915 SUBST (SET_DEST (x),
6916 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6917 dest));
6918 SUBST (SET_SRC (x), SUBREG_REG (src));
6920 src = SET_SRC (x), dest = SET_DEST (x);
6923 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6924 in SRC. */
6925 if (dest == cc0_rtx
6926 && partial_subreg_p (src)
6927 && subreg_lowpart_p (src))
6929 rtx inner = SUBREG_REG (src);
6930 machine_mode inner_mode = GET_MODE (inner);
6932 /* Here we make sure that we don't have a sign bit on. */
6933 if (val_signbit_known_clear_p (GET_MODE (src),
6934 nonzero_bits (inner, inner_mode)))
6936 SUBST (SET_SRC (x), inner);
6937 src = SET_SRC (x);
6941 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6942 would require a paradoxical subreg. Replace the subreg with a
6943 zero_extend to avoid the reload that would otherwise be required. */
6945 enum rtx_code extend_op;
6946 if (paradoxical_subreg_p (src)
6947 && MEM_P (SUBREG_REG (src))
6948 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6950 SUBST (SET_SRC (x),
6951 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6953 src = SET_SRC (x);
6956 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6957 are comparing an item known to be 0 or -1 against 0, use a logical
6958 operation instead. Check for one of the arms being an IOR of the other
6959 arm with some value. We compute three terms to be IOR'ed together. In
6960 practice, at most two will be nonzero. Then we do the IOR's. */
6962 if (GET_CODE (dest) != PC
6963 && GET_CODE (src) == IF_THEN_ELSE
6964 && is_int_mode (GET_MODE (src), &int_mode)
6965 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6966 && XEXP (XEXP (src, 0), 1) == const0_rtx
6967 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
6968 && (!HAVE_conditional_move
6969 || ! can_conditionally_move_p (int_mode))
6970 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
6971 == GET_MODE_PRECISION (int_mode))
6972 && ! side_effects_p (src))
6974 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6975 ? XEXP (src, 1) : XEXP (src, 2));
6976 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6977 ? XEXP (src, 2) : XEXP (src, 1));
6978 rtx term1 = const0_rtx, term2, term3;
6980 if (GET_CODE (true_rtx) == IOR
6981 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6982 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6983 else if (GET_CODE (true_rtx) == IOR
6984 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6985 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6986 else if (GET_CODE (false_rtx) == IOR
6987 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6988 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6989 else if (GET_CODE (false_rtx) == IOR
6990 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6991 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6993 term2 = simplify_gen_binary (AND, int_mode,
6994 XEXP (XEXP (src, 0), 0), true_rtx);
6995 term3 = simplify_gen_binary (AND, int_mode,
6996 simplify_gen_unary (NOT, int_mode,
6997 XEXP (XEXP (src, 0), 0),
6998 int_mode),
6999 false_rtx);
7001 SUBST (SET_SRC (x),
7002 simplify_gen_binary (IOR, int_mode,
7003 simplify_gen_binary (IOR, int_mode,
7004 term1, term2),
7005 term3));
7007 src = SET_SRC (x);
7010 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7011 whole thing fail. */
7012 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7013 return src;
7014 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7015 return dest;
7016 else
7017 /* Convert this into a field assignment operation, if possible. */
7018 return make_field_assignment (x);
7021 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7022 result. */
7024 static rtx
7025 simplify_logical (rtx x)
7027 rtx op0 = XEXP (x, 0);
7028 rtx op1 = XEXP (x, 1);
7029 scalar_int_mode mode;
7031 switch (GET_CODE (x))
7033 case AND:
7034 /* We can call simplify_and_const_int only if we don't lose
7035 any (sign) bits when converting INTVAL (op1) to
7036 "unsigned HOST_WIDE_INT". */
7037 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7038 && CONST_INT_P (op1)
7039 && (HWI_COMPUTABLE_MODE_P (mode)
7040 || INTVAL (op1) > 0))
7042 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7043 if (GET_CODE (x) != AND)
7044 return x;
7046 op0 = XEXP (x, 0);
7047 op1 = XEXP (x, 1);
7050 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7051 apply the distributive law and then the inverse distributive
7052 law to see if things simplify. */
7053 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7055 rtx result = distribute_and_simplify_rtx (x, 0);
7056 if (result)
7057 return result;
7059 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7061 rtx result = distribute_and_simplify_rtx (x, 1);
7062 if (result)
7063 return result;
7065 break;
7067 case IOR:
7068 /* If we have (ior (and A B) C), apply the distributive law and then
7069 the inverse distributive law to see if things simplify. */
7071 if (GET_CODE (op0) == AND)
7073 rtx result = distribute_and_simplify_rtx (x, 0);
7074 if (result)
7075 return result;
7078 if (GET_CODE (op1) == AND)
7080 rtx result = distribute_and_simplify_rtx (x, 1);
7081 if (result)
7082 return result;
7084 break;
7086 default:
7087 gcc_unreachable ();
7090 return x;
7093 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7094 operations" because they can be replaced with two more basic operations.
7095 ZERO_EXTEND is also considered "compound" because it can be replaced with
7096 an AND operation, which is simpler, though only one operation.
7098 The function expand_compound_operation is called with an rtx expression
7099 and will convert it to the appropriate shifts and AND operations,
7100 simplifying at each stage.
7102 The function make_compound_operation is called to convert an expression
7103 consisting of shifts and ANDs into the equivalent compound expression.
7104 It is the inverse of this function, loosely speaking. */
7106 static rtx
7107 expand_compound_operation (rtx x)
7109 unsigned HOST_WIDE_INT pos = 0, len;
7110 int unsignedp = 0;
7111 unsigned int modewidth;
7112 rtx tem;
7113 scalar_int_mode inner_mode;
7115 switch (GET_CODE (x))
7117 case ZERO_EXTEND:
7118 unsignedp = 1;
7119 /* FALLTHRU */
7120 case SIGN_EXTEND:
7121 /* We can't necessarily use a const_int for a multiword mode;
7122 it depends on implicitly extending the value.
7123 Since we don't know the right way to extend it,
7124 we can't tell whether the implicit way is right.
7126 Even for a mode that is no wider than a const_int,
7127 we can't win, because we need to sign extend one of its bits through
7128 the rest of it, and we don't know which bit. */
7129 if (CONST_INT_P (XEXP (x, 0)))
7130 return x;
7132 /* Reject modes that aren't scalar integers because turning vector
7133 or complex modes into shifts causes problems. */
7134 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7135 return x;
7137 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7138 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7139 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7140 reloaded. If not for that, MEM's would very rarely be safe.
7142 Reject modes bigger than a word, because we might not be able
7143 to reference a two-register group starting with an arbitrary register
7144 (and currently gen_lowpart might crash for a SUBREG). */
7146 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7147 return x;
7149 len = GET_MODE_PRECISION (inner_mode);
7150 /* If the inner object has VOIDmode (the only way this can happen
7151 is if it is an ASM_OPERANDS), we can't do anything since we don't
7152 know how much masking to do. */
7153 if (len == 0)
7154 return x;
7156 break;
7158 case ZERO_EXTRACT:
7159 unsignedp = 1;
7161 /* fall through */
7163 case SIGN_EXTRACT:
7164 /* If the operand is a CLOBBER, just return it. */
7165 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7166 return XEXP (x, 0);
7168 if (!CONST_INT_P (XEXP (x, 1))
7169 || !CONST_INT_P (XEXP (x, 2)))
7170 return x;
7172 /* Reject modes that aren't scalar integers because turning vector
7173 or complex modes into shifts causes problems. */
7174 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7175 return x;
7177 len = INTVAL (XEXP (x, 1));
7178 pos = INTVAL (XEXP (x, 2));
7180 /* This should stay within the object being extracted, fail otherwise. */
7181 if (len + pos > GET_MODE_PRECISION (inner_mode))
7182 return x;
7184 if (BITS_BIG_ENDIAN)
7185 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7187 break;
7189 default:
7190 return x;
7193 /* We've rejected non-scalar operations by now. */
7194 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7196 /* Convert sign extension to zero extension, if we know that the high
7197 bit is not set, as this is easier to optimize. It will be converted
7198 back to cheaper alternative in make_extraction. */
7199 if (GET_CODE (x) == SIGN_EXTEND
7200 && HWI_COMPUTABLE_MODE_P (mode)
7201 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7202 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7203 == 0))
7205 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7206 rtx temp2 = expand_compound_operation (temp);
7208 /* Make sure this is a profitable operation. */
7209 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7210 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7211 return temp2;
7212 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7213 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7214 return temp;
7215 else
7216 return x;
7219 /* We can optimize some special cases of ZERO_EXTEND. */
7220 if (GET_CODE (x) == ZERO_EXTEND)
7222 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7223 know that the last value didn't have any inappropriate bits
7224 set. */
7225 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7226 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7227 && HWI_COMPUTABLE_MODE_P (mode)
7228 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7229 & ~GET_MODE_MASK (inner_mode)) == 0)
7230 return XEXP (XEXP (x, 0), 0);
7232 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7233 if (GET_CODE (XEXP (x, 0)) == SUBREG
7234 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7235 && subreg_lowpart_p (XEXP (x, 0))
7236 && HWI_COMPUTABLE_MODE_P (mode)
7237 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7238 & ~GET_MODE_MASK (inner_mode)) == 0)
7239 return SUBREG_REG (XEXP (x, 0));
7241 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7242 is a comparison and STORE_FLAG_VALUE permits. This is like
7243 the first case, but it works even when MODE is larger
7244 than HOST_WIDE_INT. */
7245 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7246 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7247 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7248 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7249 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7250 return XEXP (XEXP (x, 0), 0);
7252 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7253 if (GET_CODE (XEXP (x, 0)) == SUBREG
7254 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7255 && subreg_lowpart_p (XEXP (x, 0))
7256 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7257 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7258 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7259 return SUBREG_REG (XEXP (x, 0));
7263 /* If we reach here, we want to return a pair of shifts. The inner
7264 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7265 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7266 logical depending on the value of UNSIGNEDP.
7268 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7269 converted into an AND of a shift.
7271 We must check for the case where the left shift would have a negative
7272 count. This can happen in a case like (x >> 31) & 255 on machines
7273 that can't shift by a constant. On those machines, we would first
7274 combine the shift with the AND to produce a variable-position
7275 extraction. Then the constant of 31 would be substituted in
7276 to produce such a position. */
7278 modewidth = GET_MODE_PRECISION (mode);
7279 if (modewidth >= pos + len)
7281 tem = gen_lowpart (mode, XEXP (x, 0));
7282 if (!tem || GET_CODE (tem) == CLOBBER)
7283 return x;
7284 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7285 tem, modewidth - pos - len);
7286 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7287 mode, tem, modewidth - len);
7289 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7290 tem = simplify_and_const_int (NULL_RTX, mode,
7291 simplify_shift_const (NULL_RTX, LSHIFTRT,
7292 mode, XEXP (x, 0),
7293 pos),
7294 (HOST_WIDE_INT_1U << len) - 1);
7295 else
7296 /* Any other cases we can't handle. */
7297 return x;
7299 /* If we couldn't do this for some reason, return the original
7300 expression. */
7301 if (GET_CODE (tem) == CLOBBER)
7302 return x;
7304 return tem;
7307 /* X is a SET which contains an assignment of one object into
7308 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7309 or certain SUBREGS). If possible, convert it into a series of
7310 logical operations.
7312 We half-heartedly support variable positions, but do not at all
7313 support variable lengths. */
7315 static const_rtx
7316 expand_field_assignment (const_rtx x)
7318 rtx inner;
7319 rtx pos; /* Always counts from low bit. */
7320 int len;
7321 rtx mask, cleared, masked;
7322 scalar_int_mode compute_mode;
7324 /* Loop until we find something we can't simplify. */
7325 while (1)
7327 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7328 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7330 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7331 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7332 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7334 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7335 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7337 inner = XEXP (SET_DEST (x), 0);
7338 len = INTVAL (XEXP (SET_DEST (x), 1));
7339 pos = XEXP (SET_DEST (x), 2);
7341 /* A constant position should stay within the width of INNER. */
7342 if (CONST_INT_P (pos)
7343 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7344 break;
7346 if (BITS_BIG_ENDIAN)
7348 if (CONST_INT_P (pos))
7349 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7350 - INTVAL (pos));
7351 else if (GET_CODE (pos) == MINUS
7352 && CONST_INT_P (XEXP (pos, 1))
7353 && (INTVAL (XEXP (pos, 1))
7354 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7355 /* If position is ADJUST - X, new position is X. */
7356 pos = XEXP (pos, 0);
7357 else
7359 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7360 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7361 gen_int_mode (prec - len,
7362 GET_MODE (pos)),
7363 pos);
7368 /* If the destination is a subreg that overwrites the whole of the inner
7369 register, we can move the subreg to the source. */
7370 else if (GET_CODE (SET_DEST (x)) == SUBREG
7371 /* We need SUBREGs to compute nonzero_bits properly. */
7372 && nonzero_sign_valid
7373 && !read_modify_subreg_p (SET_DEST (x)))
7375 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7376 gen_lowpart
7377 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7378 SET_SRC (x)));
7379 continue;
7381 else
7382 break;
7384 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7385 inner = SUBREG_REG (inner);
7387 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7388 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7390 /* Don't do anything for vector or complex integral types. */
7391 if (! FLOAT_MODE_P (GET_MODE (inner)))
7392 break;
7394 /* Try to find an integral mode to pun with. */
7395 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7396 .exists (&compute_mode))
7397 break;
7399 inner = gen_lowpart (compute_mode, inner);
7402 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7403 if (len >= HOST_BITS_PER_WIDE_INT)
7404 break;
7406 /* Don't try to compute in too wide unsupported modes. */
7407 if (!targetm.scalar_mode_supported_p (compute_mode))
7408 break;
7410 /* Now compute the equivalent expression. Make a copy of INNER
7411 for the SET_DEST in case it is a MEM into which we will substitute;
7412 we don't want shared RTL in that case. */
7413 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7414 compute_mode);
7415 cleared = simplify_gen_binary (AND, compute_mode,
7416 simplify_gen_unary (NOT, compute_mode,
7417 simplify_gen_binary (ASHIFT,
7418 compute_mode,
7419 mask, pos),
7420 compute_mode),
7421 inner);
7422 masked = simplify_gen_binary (ASHIFT, compute_mode,
7423 simplify_gen_binary (
7424 AND, compute_mode,
7425 gen_lowpart (compute_mode, SET_SRC (x)),
7426 mask),
7427 pos);
7429 x = gen_rtx_SET (copy_rtx (inner),
7430 simplify_gen_binary (IOR, compute_mode,
7431 cleared, masked));
7434 return x;
7437 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7438 it is an RTX that represents the (variable) starting position; otherwise,
7439 POS is the (constant) starting bit position. Both are counted from the LSB.
7441 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7443 IN_DEST is nonzero if this is a reference in the destination of a SET.
7444 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7445 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7446 be used.
7448 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7449 ZERO_EXTRACT should be built even for bits starting at bit 0.
7451 MODE is the desired mode of the result (if IN_DEST == 0).
7453 The result is an RTX for the extraction or NULL_RTX if the target
7454 can't handle it. */
7456 static rtx
7457 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7458 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7459 int in_dest, int in_compare)
7461 /* This mode describes the size of the storage area
7462 to fetch the overall value from. Within that, we
7463 ignore the POS lowest bits, etc. */
7464 machine_mode is_mode = GET_MODE (inner);
7465 machine_mode inner_mode;
7466 scalar_int_mode wanted_inner_mode;
7467 scalar_int_mode wanted_inner_reg_mode = word_mode;
7468 scalar_int_mode pos_mode = word_mode;
7469 machine_mode extraction_mode = word_mode;
7470 rtx new_rtx = 0;
7471 rtx orig_pos_rtx = pos_rtx;
7472 HOST_WIDE_INT orig_pos;
7474 if (pos_rtx && CONST_INT_P (pos_rtx))
7475 pos = INTVAL (pos_rtx), pos_rtx = 0;
7477 if (GET_CODE (inner) == SUBREG
7478 && subreg_lowpart_p (inner)
7479 && (paradoxical_subreg_p (inner)
7480 /* If trying or potentionally trying to extract
7481 bits outside of is_mode, don't look through
7482 non-paradoxical SUBREGs. See PR82192. */
7483 || (pos_rtx == NULL_RTX
7484 && pos + len <= GET_MODE_PRECISION (is_mode))))
7486 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7487 consider just the QI as the memory to extract from.
7488 The subreg adds or removes high bits; its mode is
7489 irrelevant to the meaning of this extraction,
7490 since POS and LEN count from the lsb. */
7491 if (MEM_P (SUBREG_REG (inner)))
7492 is_mode = GET_MODE (SUBREG_REG (inner));
7493 inner = SUBREG_REG (inner);
7495 else if (GET_CODE (inner) == ASHIFT
7496 && CONST_INT_P (XEXP (inner, 1))
7497 && pos_rtx == 0 && pos == 0
7498 && len > UINTVAL (XEXP (inner, 1)))
7500 /* We're extracting the least significant bits of an rtx
7501 (ashift X (const_int C)), where LEN > C. Extract the
7502 least significant (LEN - C) bits of X, giving an rtx
7503 whose mode is MODE, then shift it left C times. */
7504 new_rtx = make_extraction (mode, XEXP (inner, 0),
7505 0, 0, len - INTVAL (XEXP (inner, 1)),
7506 unsignedp, in_dest, in_compare);
7507 if (new_rtx != 0)
7508 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7510 else if (GET_CODE (inner) == TRUNCATE
7511 /* If trying or potentionally trying to extract
7512 bits outside of is_mode, don't look through
7513 TRUNCATE. See PR82192. */
7514 && pos_rtx == NULL_RTX
7515 && pos + len <= GET_MODE_PRECISION (is_mode))
7516 inner = XEXP (inner, 0);
7518 inner_mode = GET_MODE (inner);
7520 /* See if this can be done without an extraction. We never can if the
7521 width of the field is not the same as that of some integer mode. For
7522 registers, we can only avoid the extraction if the position is at the
7523 low-order bit and this is either not in the destination or we have the
7524 appropriate STRICT_LOW_PART operation available.
7526 For MEM, we can avoid an extract if the field starts on an appropriate
7527 boundary and we can change the mode of the memory reference. */
7529 scalar_int_mode tmode;
7530 if (int_mode_for_size (len, 1).exists (&tmode)
7531 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7532 && !MEM_P (inner)
7533 && (pos == 0 || REG_P (inner))
7534 && (inner_mode == tmode
7535 || !REG_P (inner)
7536 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7537 || reg_truncated_to_mode (tmode, inner))
7538 && (! in_dest
7539 || (REG_P (inner)
7540 && have_insn_for (STRICT_LOW_PART, tmode))))
7541 || (MEM_P (inner) && pos_rtx == 0
7542 && (pos
7543 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7544 : BITS_PER_UNIT)) == 0
7545 /* We can't do this if we are widening INNER_MODE (it
7546 may not be aligned, for one thing). */
7547 && !paradoxical_subreg_p (tmode, inner_mode)
7548 && (inner_mode == tmode
7549 || (! mode_dependent_address_p (XEXP (inner, 0),
7550 MEM_ADDR_SPACE (inner))
7551 && ! MEM_VOLATILE_P (inner))))))
7553 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7554 field. If the original and current mode are the same, we need not
7555 adjust the offset. Otherwise, we do if bytes big endian.
7557 If INNER is not a MEM, get a piece consisting of just the field
7558 of interest (in this case POS % BITS_PER_WORD must be 0). */
7560 if (MEM_P (inner))
7562 HOST_WIDE_INT offset;
7564 /* POS counts from lsb, but make OFFSET count in memory order. */
7565 if (BYTES_BIG_ENDIAN)
7566 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7567 else
7568 offset = pos / BITS_PER_UNIT;
7570 new_rtx = adjust_address_nv (inner, tmode, offset);
7572 else if (REG_P (inner))
7574 if (tmode != inner_mode)
7576 /* We can't call gen_lowpart in a DEST since we
7577 always want a SUBREG (see below) and it would sometimes
7578 return a new hard register. */
7579 if (pos || in_dest)
7581 unsigned int offset
7582 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7584 /* Avoid creating invalid subregs, for example when
7585 simplifying (x>>32)&255. */
7586 if (!validate_subreg (tmode, inner_mode, inner, offset))
7587 return NULL_RTX;
7589 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7591 else
7592 new_rtx = gen_lowpart (tmode, inner);
7594 else
7595 new_rtx = inner;
7597 else
7598 new_rtx = force_to_mode (inner, tmode,
7599 len >= HOST_BITS_PER_WIDE_INT
7600 ? HOST_WIDE_INT_M1U
7601 : (HOST_WIDE_INT_1U << len) - 1, 0);
7603 /* If this extraction is going into the destination of a SET,
7604 make a STRICT_LOW_PART unless we made a MEM. */
7606 if (in_dest)
7607 return (MEM_P (new_rtx) ? new_rtx
7608 : (GET_CODE (new_rtx) != SUBREG
7609 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7610 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7612 if (mode == tmode)
7613 return new_rtx;
7615 if (CONST_SCALAR_INT_P (new_rtx))
7616 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7617 mode, new_rtx, tmode);
7619 /* If we know that no extraneous bits are set, and that the high
7620 bit is not set, convert the extraction to the cheaper of
7621 sign and zero extension, that are equivalent in these cases. */
7622 if (flag_expensive_optimizations
7623 && (HWI_COMPUTABLE_MODE_P (tmode)
7624 && ((nonzero_bits (new_rtx, tmode)
7625 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7626 == 0)))
7628 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7629 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7631 /* Prefer ZERO_EXTENSION, since it gives more information to
7632 backends. */
7633 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7634 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7635 return temp;
7636 return temp1;
7639 /* Otherwise, sign- or zero-extend unless we already are in the
7640 proper mode. */
7642 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7643 mode, new_rtx));
7646 /* Unless this is a COMPARE or we have a funny memory reference,
7647 don't do anything with zero-extending field extracts starting at
7648 the low-order bit since they are simple AND operations. */
7649 if (pos_rtx == 0 && pos == 0 && ! in_dest
7650 && ! in_compare && unsignedp)
7651 return 0;
7653 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7654 if the position is not a constant and the length is not 1. In all
7655 other cases, we would only be going outside our object in cases when
7656 an original shift would have been undefined. */
7657 if (MEM_P (inner)
7658 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7659 || (pos_rtx != 0 && len != 1)))
7660 return 0;
7662 enum extraction_pattern pattern = (in_dest ? EP_insv
7663 : unsignedp ? EP_extzv : EP_extv);
7665 /* If INNER is not from memory, we want it to have the mode of a register
7666 extraction pattern's structure operand, or word_mode if there is no
7667 such pattern. The same applies to extraction_mode and pos_mode
7668 and their respective operands.
7670 For memory, assume that the desired extraction_mode and pos_mode
7671 are the same as for a register operation, since at present we don't
7672 have named patterns for aligned memory structures. */
7673 struct extraction_insn insn;
7674 if (get_best_reg_extraction_insn (&insn, pattern,
7675 GET_MODE_BITSIZE (inner_mode), mode))
7677 wanted_inner_reg_mode = insn.struct_mode.require ();
7678 pos_mode = insn.pos_mode;
7679 extraction_mode = insn.field_mode;
7682 /* Never narrow an object, since that might not be safe. */
7684 if (mode != VOIDmode
7685 && partial_subreg_p (extraction_mode, mode))
7686 extraction_mode = mode;
7688 if (!MEM_P (inner))
7689 wanted_inner_mode = wanted_inner_reg_mode;
7690 else
7692 /* Be careful not to go beyond the extracted object and maintain the
7693 natural alignment of the memory. */
7694 wanted_inner_mode = smallest_int_mode_for_size (len);
7695 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7696 > GET_MODE_BITSIZE (wanted_inner_mode))
7697 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7700 orig_pos = pos;
7702 if (BITS_BIG_ENDIAN)
7704 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7705 BITS_BIG_ENDIAN style. If position is constant, compute new
7706 position. Otherwise, build subtraction.
7707 Note that POS is relative to the mode of the original argument.
7708 If it's a MEM we need to recompute POS relative to that.
7709 However, if we're extracting from (or inserting into) a register,
7710 we want to recompute POS relative to wanted_inner_mode. */
7711 int width = (MEM_P (inner)
7712 ? GET_MODE_BITSIZE (is_mode)
7713 : GET_MODE_BITSIZE (wanted_inner_mode));
7715 if (pos_rtx == 0)
7716 pos = width - len - pos;
7717 else
7718 pos_rtx
7719 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7720 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7721 pos_rtx);
7722 /* POS may be less than 0 now, but we check for that below.
7723 Note that it can only be less than 0 if !MEM_P (inner). */
7726 /* If INNER has a wider mode, and this is a constant extraction, try to
7727 make it smaller and adjust the byte to point to the byte containing
7728 the value. */
7729 if (wanted_inner_mode != VOIDmode
7730 && inner_mode != wanted_inner_mode
7731 && ! pos_rtx
7732 && partial_subreg_p (wanted_inner_mode, is_mode)
7733 && MEM_P (inner)
7734 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7735 && ! MEM_VOLATILE_P (inner))
7737 int offset = 0;
7739 /* The computations below will be correct if the machine is big
7740 endian in both bits and bytes or little endian in bits and bytes.
7741 If it is mixed, we must adjust. */
7743 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7744 adjust OFFSET to compensate. */
7745 if (BYTES_BIG_ENDIAN
7746 && paradoxical_subreg_p (is_mode, inner_mode))
7747 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7749 /* We can now move to the desired byte. */
7750 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7751 * GET_MODE_SIZE (wanted_inner_mode);
7752 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7754 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7755 && is_mode != wanted_inner_mode)
7756 offset = (GET_MODE_SIZE (is_mode)
7757 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7759 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7762 /* If INNER is not memory, get it into the proper mode. If we are changing
7763 its mode, POS must be a constant and smaller than the size of the new
7764 mode. */
7765 else if (!MEM_P (inner))
7767 /* On the LHS, don't create paradoxical subregs implicitely truncating
7768 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7769 if (in_dest
7770 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7771 wanted_inner_mode))
7772 return NULL_RTX;
7774 if (GET_MODE (inner) != wanted_inner_mode
7775 && (pos_rtx != 0
7776 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7777 return NULL_RTX;
7779 if (orig_pos < 0)
7780 return NULL_RTX;
7782 inner = force_to_mode (inner, wanted_inner_mode,
7783 pos_rtx
7784 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7785 ? HOST_WIDE_INT_M1U
7786 : (((HOST_WIDE_INT_1U << len) - 1)
7787 << orig_pos),
7791 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7792 have to zero extend. Otherwise, we can just use a SUBREG.
7794 We dealt with constant rtxes earlier, so pos_rtx cannot
7795 have VOIDmode at this point. */
7796 if (pos_rtx != 0
7797 && (GET_MODE_SIZE (pos_mode)
7798 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7800 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7801 GET_MODE (pos_rtx));
7803 /* If we know that no extraneous bits are set, and that the high
7804 bit is not set, convert extraction to cheaper one - either
7805 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7806 cases. */
7807 if (flag_expensive_optimizations
7808 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7809 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7810 & ~(((unsigned HOST_WIDE_INT)
7811 GET_MODE_MASK (GET_MODE (pos_rtx)))
7812 >> 1))
7813 == 0)))
7815 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7816 GET_MODE (pos_rtx));
7818 /* Prefer ZERO_EXTENSION, since it gives more information to
7819 backends. */
7820 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7821 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7822 temp = temp1;
7824 pos_rtx = temp;
7827 /* Make POS_RTX unless we already have it and it is correct. If we don't
7828 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7829 be a CONST_INT. */
7830 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7831 pos_rtx = orig_pos_rtx;
7833 else if (pos_rtx == 0)
7834 pos_rtx = GEN_INT (pos);
7836 /* Make the required operation. See if we can use existing rtx. */
7837 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7838 extraction_mode, inner, GEN_INT (len), pos_rtx);
7839 if (! in_dest)
7840 new_rtx = gen_lowpart (mode, new_rtx);
7842 return new_rtx;
7845 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7846 can be commuted with any other operations in X. Return X without
7847 that shift if so. */
7849 static rtx
7850 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7852 enum rtx_code code = GET_CODE (x);
7853 rtx tem;
7855 switch (code)
7857 case ASHIFT:
7858 /* This is the shift itself. If it is wide enough, we will return
7859 either the value being shifted if the shift count is equal to
7860 COUNT or a shift for the difference. */
7861 if (CONST_INT_P (XEXP (x, 1))
7862 && INTVAL (XEXP (x, 1)) >= count)
7863 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7864 INTVAL (XEXP (x, 1)) - count);
7865 break;
7867 case NEG: case NOT:
7868 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7869 return simplify_gen_unary (code, mode, tem, mode);
7871 break;
7873 case PLUS: case IOR: case XOR: case AND:
7874 /* If we can safely shift this constant and we find the inner shift,
7875 make a new operation. */
7876 if (CONST_INT_P (XEXP (x, 1))
7877 && (UINTVAL (XEXP (x, 1))
7878 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7879 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7881 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7882 return simplify_gen_binary (code, mode, tem,
7883 gen_int_mode (val, mode));
7885 break;
7887 default:
7888 break;
7891 return 0;
7894 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7895 level of the expression and MODE is its mode. IN_CODE is as for
7896 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7897 that should be used when recursing on operands of *X_PTR.
7899 There are two possible actions:
7901 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7902 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7904 - Return a new rtx, which the caller returns directly. */
7906 static rtx
7907 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7908 enum rtx_code in_code,
7909 enum rtx_code *next_code_ptr)
7911 rtx x = *x_ptr;
7912 enum rtx_code next_code = *next_code_ptr;
7913 enum rtx_code code = GET_CODE (x);
7914 int mode_width = GET_MODE_PRECISION (mode);
7915 rtx rhs, lhs;
7916 rtx new_rtx = 0;
7917 int i;
7918 rtx tem;
7919 scalar_int_mode inner_mode;
7920 bool equality_comparison = false;
7922 if (in_code == EQ)
7924 equality_comparison = true;
7925 in_code = COMPARE;
7928 /* Process depending on the code of this operation. If NEW is set
7929 nonzero, it will be returned. */
7931 switch (code)
7933 case ASHIFT:
7934 /* Convert shifts by constants into multiplications if inside
7935 an address. */
7936 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7937 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7938 && INTVAL (XEXP (x, 1)) >= 0)
7940 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7941 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7943 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7944 if (GET_CODE (new_rtx) == NEG)
7946 new_rtx = XEXP (new_rtx, 0);
7947 multval = -multval;
7949 multval = trunc_int_for_mode (multval, mode);
7950 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7952 break;
7954 case PLUS:
7955 lhs = XEXP (x, 0);
7956 rhs = XEXP (x, 1);
7957 lhs = make_compound_operation (lhs, next_code);
7958 rhs = make_compound_operation (rhs, next_code);
7959 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7961 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7962 XEXP (lhs, 1));
7963 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7965 else if (GET_CODE (lhs) == MULT
7966 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7968 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7969 simplify_gen_unary (NEG, mode,
7970 XEXP (lhs, 1),
7971 mode));
7972 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7974 else
7976 SUBST (XEXP (x, 0), lhs);
7977 SUBST (XEXP (x, 1), rhs);
7979 maybe_swap_commutative_operands (x);
7980 return x;
7982 case MINUS:
7983 lhs = XEXP (x, 0);
7984 rhs = XEXP (x, 1);
7985 lhs = make_compound_operation (lhs, next_code);
7986 rhs = make_compound_operation (rhs, next_code);
7987 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
7989 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7990 XEXP (rhs, 1));
7991 return simplify_gen_binary (PLUS, mode, tem, lhs);
7993 else if (GET_CODE (rhs) == MULT
7994 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7996 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7997 simplify_gen_unary (NEG, mode,
7998 XEXP (rhs, 1),
7999 mode));
8000 return simplify_gen_binary (PLUS, mode, tem, lhs);
8002 else
8004 SUBST (XEXP (x, 0), lhs);
8005 SUBST (XEXP (x, 1), rhs);
8006 return x;
8009 case AND:
8010 /* If the second operand is not a constant, we can't do anything
8011 with it. */
8012 if (!CONST_INT_P (XEXP (x, 1)))
8013 break;
8015 /* If the constant is a power of two minus one and the first operand
8016 is a logical right shift, make an extraction. */
8017 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8018 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8020 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8021 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8022 i, 1, 0, in_code == COMPARE);
8025 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8026 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8027 && subreg_lowpart_p (XEXP (x, 0))
8028 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8029 &inner_mode)
8030 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8031 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8033 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8034 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8035 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8036 XEXP (inner_x0, 1),
8037 i, 1, 0, in_code == COMPARE);
8039 /* If we narrowed the mode when dropping the subreg, then we lose. */
8040 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8041 new_rtx = NULL;
8043 /* If that didn't give anything, see if the AND simplifies on
8044 its own. */
8045 if (!new_rtx && i >= 0)
8047 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8048 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8049 0, in_code == COMPARE);
8052 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8053 else if ((GET_CODE (XEXP (x, 0)) == XOR
8054 || GET_CODE (XEXP (x, 0)) == IOR)
8055 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8056 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8057 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8059 /* Apply the distributive law, and then try to make extractions. */
8060 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8061 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8062 XEXP (x, 1)),
8063 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8064 XEXP (x, 1)));
8065 new_rtx = make_compound_operation (new_rtx, in_code);
8068 /* If we are have (and (rotate X C) M) and C is larger than the number
8069 of bits in M, this is an extraction. */
8071 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8072 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8073 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8074 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8076 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8077 new_rtx = make_extraction (mode, new_rtx,
8078 (GET_MODE_PRECISION (mode)
8079 - INTVAL (XEXP (XEXP (x, 0), 1))),
8080 NULL_RTX, i, 1, 0, in_code == COMPARE);
8083 /* On machines without logical shifts, if the operand of the AND is
8084 a logical shift and our mask turns off all the propagated sign
8085 bits, we can replace the logical shift with an arithmetic shift. */
8086 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8087 && !have_insn_for (LSHIFTRT, mode)
8088 && have_insn_for (ASHIFTRT, mode)
8089 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8090 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8091 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8092 && mode_width <= HOST_BITS_PER_WIDE_INT)
8094 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8096 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8097 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8098 SUBST (XEXP (x, 0),
8099 gen_rtx_ASHIFTRT (mode,
8100 make_compound_operation (XEXP (XEXP (x,
8103 next_code),
8104 XEXP (XEXP (x, 0), 1)));
8107 /* If the constant is one less than a power of two, this might be
8108 representable by an extraction even if no shift is present.
8109 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8110 we are in a COMPARE. */
8111 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8112 new_rtx = make_extraction (mode,
8113 make_compound_operation (XEXP (x, 0),
8114 next_code),
8115 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8117 /* If we are in a comparison and this is an AND with a power of two,
8118 convert this into the appropriate bit extract. */
8119 else if (in_code == COMPARE
8120 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8121 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8122 new_rtx = make_extraction (mode,
8123 make_compound_operation (XEXP (x, 0),
8124 next_code),
8125 i, NULL_RTX, 1, 1, 0, 1);
8127 /* If the one operand is a paradoxical subreg of a register or memory and
8128 the constant (limited to the smaller mode) has only zero bits where
8129 the sub expression has known zero bits, this can be expressed as
8130 a zero_extend. */
8131 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8133 rtx sub;
8135 sub = XEXP (XEXP (x, 0), 0);
8136 machine_mode sub_mode = GET_MODE (sub);
8137 if ((REG_P (sub) || MEM_P (sub))
8138 && GET_MODE_PRECISION (sub_mode) < mode_width)
8140 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8141 unsigned HOST_WIDE_INT mask;
8143 /* original AND constant with all the known zero bits set */
8144 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8145 if ((mask & mode_mask) == mode_mask)
8147 new_rtx = make_compound_operation (sub, next_code);
8148 new_rtx = make_extraction (mode, new_rtx, 0, 0,
8149 GET_MODE_PRECISION (sub_mode),
8150 1, 0, in_code == COMPARE);
8155 break;
8157 case LSHIFTRT:
8158 /* If the sign bit is known to be zero, replace this with an
8159 arithmetic shift. */
8160 if (have_insn_for (ASHIFTRT, mode)
8161 && ! have_insn_for (LSHIFTRT, mode)
8162 && mode_width <= HOST_BITS_PER_WIDE_INT
8163 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8165 new_rtx = gen_rtx_ASHIFTRT (mode,
8166 make_compound_operation (XEXP (x, 0),
8167 next_code),
8168 XEXP (x, 1));
8169 break;
8172 /* fall through */
8174 case ASHIFTRT:
8175 lhs = XEXP (x, 0);
8176 rhs = XEXP (x, 1);
8178 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8179 this is a SIGN_EXTRACT. */
8180 if (CONST_INT_P (rhs)
8181 && GET_CODE (lhs) == ASHIFT
8182 && CONST_INT_P (XEXP (lhs, 1))
8183 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8184 && INTVAL (XEXP (lhs, 1)) >= 0
8185 && INTVAL (rhs) < mode_width)
8187 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8188 new_rtx = make_extraction (mode, new_rtx,
8189 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8190 NULL_RTX, mode_width - INTVAL (rhs),
8191 code == LSHIFTRT, 0, in_code == COMPARE);
8192 break;
8195 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8196 If so, try to merge the shifts into a SIGN_EXTEND. We could
8197 also do this for some cases of SIGN_EXTRACT, but it doesn't
8198 seem worth the effort; the case checked for occurs on Alpha. */
8200 if (!OBJECT_P (lhs)
8201 && ! (GET_CODE (lhs) == SUBREG
8202 && (OBJECT_P (SUBREG_REG (lhs))))
8203 && CONST_INT_P (rhs)
8204 && INTVAL (rhs) >= 0
8205 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8206 && INTVAL (rhs) < mode_width
8207 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8208 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8209 next_code),
8210 0, NULL_RTX, mode_width - INTVAL (rhs),
8211 code == LSHIFTRT, 0, in_code == COMPARE);
8213 break;
8215 case SUBREG:
8216 /* Call ourselves recursively on the inner expression. If we are
8217 narrowing the object and it has a different RTL code from
8218 what it originally did, do this SUBREG as a force_to_mode. */
8220 rtx inner = SUBREG_REG (x), simplified;
8221 enum rtx_code subreg_code = in_code;
8223 /* If the SUBREG is masking of a logical right shift,
8224 make an extraction. */
8225 if (GET_CODE (inner) == LSHIFTRT
8226 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8227 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8228 && CONST_INT_P (XEXP (inner, 1))
8229 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8230 && subreg_lowpart_p (x))
8232 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8233 int width = GET_MODE_PRECISION (inner_mode)
8234 - INTVAL (XEXP (inner, 1));
8235 if (width > mode_width)
8236 width = mode_width;
8237 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8238 width, 1, 0, in_code == COMPARE);
8239 break;
8242 /* If in_code is COMPARE, it isn't always safe to pass it through
8243 to the recursive make_compound_operation call. */
8244 if (subreg_code == COMPARE
8245 && (!subreg_lowpart_p (x)
8246 || GET_CODE (inner) == SUBREG
8247 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8248 is (const_int 0), rather than
8249 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8250 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8251 for non-equality comparisons against 0 is not equivalent
8252 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8253 || (GET_CODE (inner) == AND
8254 && CONST_INT_P (XEXP (inner, 1))
8255 && partial_subreg_p (x)
8256 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8257 >= GET_MODE_BITSIZE (mode) - 1)))
8258 subreg_code = SET;
8260 tem = make_compound_operation (inner, subreg_code);
8262 simplified
8263 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8264 if (simplified)
8265 tem = simplified;
8267 if (GET_CODE (tem) != GET_CODE (inner)
8268 && partial_subreg_p (x)
8269 && subreg_lowpart_p (x))
8271 rtx newer
8272 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8274 /* If we have something other than a SUBREG, we might have
8275 done an expansion, so rerun ourselves. */
8276 if (GET_CODE (newer) != SUBREG)
8277 newer = make_compound_operation (newer, in_code);
8279 /* force_to_mode can expand compounds. If it just re-expanded
8280 the compound, use gen_lowpart to convert to the desired
8281 mode. */
8282 if (rtx_equal_p (newer, x)
8283 /* Likewise if it re-expanded the compound only partially.
8284 This happens for SUBREG of ZERO_EXTRACT if they extract
8285 the same number of bits. */
8286 || (GET_CODE (newer) == SUBREG
8287 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8288 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8289 && GET_CODE (inner) == AND
8290 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8291 return gen_lowpart (GET_MODE (x), tem);
8293 return newer;
8296 if (simplified)
8297 return tem;
8299 break;
8301 default:
8302 break;
8305 if (new_rtx)
8306 *x_ptr = gen_lowpart (mode, new_rtx);
8307 *next_code_ptr = next_code;
8308 return NULL_RTX;
8311 /* Look at the expression rooted at X. Look for expressions
8312 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8313 Form these expressions.
8315 Return the new rtx, usually just X.
8317 Also, for machines like the VAX that don't have logical shift insns,
8318 try to convert logical to arithmetic shift operations in cases where
8319 they are equivalent. This undoes the canonicalizations to logical
8320 shifts done elsewhere.
8322 We try, as much as possible, to re-use rtl expressions to save memory.
8324 IN_CODE says what kind of expression we are processing. Normally, it is
8325 SET. In a memory address it is MEM. When processing the arguments of
8326 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8327 precisely it is an equality comparison against zero. */
8330 make_compound_operation (rtx x, enum rtx_code in_code)
8332 enum rtx_code code = GET_CODE (x);
8333 const char *fmt;
8334 int i, j;
8335 enum rtx_code next_code;
8336 rtx new_rtx, tem;
8338 /* Select the code to be used in recursive calls. Once we are inside an
8339 address, we stay there. If we have a comparison, set to COMPARE,
8340 but once inside, go back to our default of SET. */
8342 next_code = (code == MEM ? MEM
8343 : ((code == COMPARE || COMPARISON_P (x))
8344 && XEXP (x, 1) == const0_rtx) ? COMPARE
8345 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8347 scalar_int_mode mode;
8348 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8350 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8351 &next_code);
8352 if (new_rtx)
8353 return new_rtx;
8354 code = GET_CODE (x);
8357 /* Now recursively process each operand of this operation. We need to
8358 handle ZERO_EXTEND specially so that we don't lose track of the
8359 inner mode. */
8360 if (code == ZERO_EXTEND)
8362 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8363 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8364 new_rtx, GET_MODE (XEXP (x, 0)));
8365 if (tem)
8366 return tem;
8367 SUBST (XEXP (x, 0), new_rtx);
8368 return x;
8371 fmt = GET_RTX_FORMAT (code);
8372 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8373 if (fmt[i] == 'e')
8375 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8376 SUBST (XEXP (x, i), new_rtx);
8378 else if (fmt[i] == 'E')
8379 for (j = 0; j < XVECLEN (x, i); j++)
8381 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8382 SUBST (XVECEXP (x, i, j), new_rtx);
8385 maybe_swap_commutative_operands (x);
8386 return x;
8389 /* Given M see if it is a value that would select a field of bits
8390 within an item, but not the entire word. Return -1 if not.
8391 Otherwise, return the starting position of the field, where 0 is the
8392 low-order bit.
8394 *PLEN is set to the length of the field. */
8396 static int
8397 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8399 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8400 int pos = m ? ctz_hwi (m) : -1;
8401 int len = 0;
8403 if (pos >= 0)
8404 /* Now shift off the low-order zero bits and see if we have a
8405 power of two minus 1. */
8406 len = exact_log2 ((m >> pos) + 1);
8408 if (len <= 0)
8409 pos = -1;
8411 *plen = len;
8412 return pos;
8415 /* If X refers to a register that equals REG in value, replace these
8416 references with REG. */
8417 static rtx
8418 canon_reg_for_combine (rtx x, rtx reg)
8420 rtx op0, op1, op2;
8421 const char *fmt;
8422 int i;
8423 bool copied;
8425 enum rtx_code code = GET_CODE (x);
8426 switch (GET_RTX_CLASS (code))
8428 case RTX_UNARY:
8429 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8430 if (op0 != XEXP (x, 0))
8431 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8432 GET_MODE (reg));
8433 break;
8435 case RTX_BIN_ARITH:
8436 case RTX_COMM_ARITH:
8437 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8438 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8439 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8440 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8441 break;
8443 case RTX_COMPARE:
8444 case RTX_COMM_COMPARE:
8445 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8446 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8447 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8448 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8449 GET_MODE (op0), op0, op1);
8450 break;
8452 case RTX_TERNARY:
8453 case RTX_BITFIELD_OPS:
8454 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8455 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8456 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8457 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8458 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8459 GET_MODE (op0), op0, op1, op2);
8460 /* FALLTHRU */
8462 case RTX_OBJ:
8463 if (REG_P (x))
8465 if (rtx_equal_p (get_last_value (reg), x)
8466 || rtx_equal_p (reg, get_last_value (x)))
8467 return reg;
8468 else
8469 break;
8472 /* fall through */
8474 default:
8475 fmt = GET_RTX_FORMAT (code);
8476 copied = false;
8477 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8478 if (fmt[i] == 'e')
8480 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8481 if (op != XEXP (x, i))
8483 if (!copied)
8485 copied = true;
8486 x = copy_rtx (x);
8488 XEXP (x, i) = op;
8491 else if (fmt[i] == 'E')
8493 int j;
8494 for (j = 0; j < XVECLEN (x, i); j++)
8496 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8497 if (op != XVECEXP (x, i, j))
8499 if (!copied)
8501 copied = true;
8502 x = copy_rtx (x);
8504 XVECEXP (x, i, j) = op;
8509 break;
8512 return x;
8515 /* Return X converted to MODE. If the value is already truncated to
8516 MODE we can just return a subreg even though in the general case we
8517 would need an explicit truncation. */
8519 static rtx
8520 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8522 if (!CONST_INT_P (x)
8523 && partial_subreg_p (mode, GET_MODE (x))
8524 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8525 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8527 /* Bit-cast X into an integer mode. */
8528 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8529 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8530 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8531 x, GET_MODE (x));
8534 return gen_lowpart (mode, x);
8537 /* See if X can be simplified knowing that we will only refer to it in
8538 MODE and will only refer to those bits that are nonzero in MASK.
8539 If other bits are being computed or if masking operations are done
8540 that select a superset of the bits in MASK, they can sometimes be
8541 ignored.
8543 Return a possibly simplified expression, but always convert X to
8544 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8546 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8547 are all off in X. This is used when X will be complemented, by either
8548 NOT, NEG, or XOR. */
8550 static rtx
8551 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8552 int just_select)
8554 enum rtx_code code = GET_CODE (x);
8555 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8556 machine_mode op_mode;
8557 unsigned HOST_WIDE_INT nonzero;
8559 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8560 code below will do the wrong thing since the mode of such an
8561 expression is VOIDmode.
8563 Also do nothing if X is a CLOBBER; this can happen if X was
8564 the return value from a call to gen_lowpart. */
8565 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8566 return x;
8568 /* We want to perform the operation in its present mode unless we know
8569 that the operation is valid in MODE, in which case we do the operation
8570 in MODE. */
8571 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8572 && have_insn_for (code, mode))
8573 ? mode : GET_MODE (x));
8575 /* It is not valid to do a right-shift in a narrower mode
8576 than the one it came in with. */
8577 if ((code == LSHIFTRT || code == ASHIFTRT)
8578 && partial_subreg_p (mode, GET_MODE (x)))
8579 op_mode = GET_MODE (x);
8581 /* Truncate MASK to fit OP_MODE. */
8582 if (op_mode)
8583 mask &= GET_MODE_MASK (op_mode);
8585 /* Determine what bits of X are guaranteed to be (non)zero. */
8586 nonzero = nonzero_bits (x, mode);
8588 /* If none of the bits in X are needed, return a zero. */
8589 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8590 x = const0_rtx;
8592 /* If X is a CONST_INT, return a new one. Do this here since the
8593 test below will fail. */
8594 if (CONST_INT_P (x))
8596 if (SCALAR_INT_MODE_P (mode))
8597 return gen_int_mode (INTVAL (x) & mask, mode);
8598 else
8600 x = GEN_INT (INTVAL (x) & mask);
8601 return gen_lowpart_common (mode, x);
8605 /* If X is narrower than MODE and we want all the bits in X's mode, just
8606 get X in the proper mode. */
8607 if (paradoxical_subreg_p (mode, GET_MODE (x))
8608 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8609 return gen_lowpart (mode, x);
8611 /* We can ignore the effect of a SUBREG if it narrows the mode or
8612 if the constant masks to zero all the bits the mode doesn't have. */
8613 if (GET_CODE (x) == SUBREG
8614 && subreg_lowpart_p (x)
8615 && (partial_subreg_p (x)
8616 || (0 == (mask
8617 & GET_MODE_MASK (GET_MODE (x))
8618 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8619 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8621 scalar_int_mode int_mode, xmode;
8622 if (is_a <scalar_int_mode> (mode, &int_mode)
8623 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8624 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8625 integer too. */
8626 return force_int_to_mode (x, int_mode, xmode,
8627 as_a <scalar_int_mode> (op_mode),
8628 mask, just_select);
8630 return gen_lowpart_or_truncate (mode, x);
8633 /* Subroutine of force_to_mode that handles cases in which both X and
8634 the result are scalar integers. MODE is the mode of the result,
8635 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8636 is preferred for simplified versions of X. The other arguments
8637 are as for force_to_mode. */
8639 static rtx
8640 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8641 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8642 int just_select)
8644 enum rtx_code code = GET_CODE (x);
8645 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8646 unsigned HOST_WIDE_INT fuller_mask;
8647 rtx op0, op1, temp;
8649 /* When we have an arithmetic operation, or a shift whose count we
8650 do not know, we need to assume that all bits up to the highest-order
8651 bit in MASK will be needed. This is how we form such a mask. */
8652 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8653 fuller_mask = HOST_WIDE_INT_M1U;
8654 else
8655 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8656 - 1);
8658 switch (code)
8660 case CLOBBER:
8661 /* If X is a (clobber (const_int)), return it since we know we are
8662 generating something that won't match. */
8663 return x;
8665 case SIGN_EXTEND:
8666 case ZERO_EXTEND:
8667 case ZERO_EXTRACT:
8668 case SIGN_EXTRACT:
8669 x = expand_compound_operation (x);
8670 if (GET_CODE (x) != code)
8671 return force_to_mode (x, mode, mask, next_select);
8672 break;
8674 case TRUNCATE:
8675 /* Similarly for a truncate. */
8676 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8678 case AND:
8679 /* If this is an AND with a constant, convert it into an AND
8680 whose constant is the AND of that constant with MASK. If it
8681 remains an AND of MASK, delete it since it is redundant. */
8683 if (CONST_INT_P (XEXP (x, 1)))
8685 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8686 mask & INTVAL (XEXP (x, 1)));
8687 xmode = op_mode;
8689 /* If X is still an AND, see if it is an AND with a mask that
8690 is just some low-order bits. If so, and it is MASK, we don't
8691 need it. */
8693 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8694 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8695 x = XEXP (x, 0);
8697 /* If it remains an AND, try making another AND with the bits
8698 in the mode mask that aren't in MASK turned on. If the
8699 constant in the AND is wide enough, this might make a
8700 cheaper constant. */
8702 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8703 && GET_MODE_MASK (xmode) != mask
8704 && HWI_COMPUTABLE_MODE_P (xmode))
8706 unsigned HOST_WIDE_INT cval
8707 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8708 rtx y;
8710 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8711 gen_int_mode (cval, xmode));
8712 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8713 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8714 x = y;
8717 break;
8720 goto binop;
8722 case PLUS:
8723 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8724 low-order bits (as in an alignment operation) and FOO is already
8725 aligned to that boundary, mask C1 to that boundary as well.
8726 This may eliminate that PLUS and, later, the AND. */
8729 unsigned int width = GET_MODE_PRECISION (mode);
8730 unsigned HOST_WIDE_INT smask = mask;
8732 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8733 number, sign extend it. */
8735 if (width < HOST_BITS_PER_WIDE_INT
8736 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8737 smask |= HOST_WIDE_INT_M1U << width;
8739 if (CONST_INT_P (XEXP (x, 1))
8740 && pow2p_hwi (- smask)
8741 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8742 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8743 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8744 (INTVAL (XEXP (x, 1)) & smask)),
8745 mode, smask, next_select);
8748 /* fall through */
8750 case MULT:
8751 /* Substituting into the operands of a widening MULT is not likely to
8752 create RTL matching a machine insn. */
8753 if (code == MULT
8754 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8755 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8756 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8757 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8758 && REG_P (XEXP (XEXP (x, 0), 0))
8759 && REG_P (XEXP (XEXP (x, 1), 0)))
8760 return gen_lowpart_or_truncate (mode, x);
8762 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8763 most significant bit in MASK since carries from those bits will
8764 affect the bits we are interested in. */
8765 mask = fuller_mask;
8766 goto binop;
8768 case MINUS:
8769 /* If X is (minus C Y) where C's least set bit is larger than any bit
8770 in the mask, then we may replace with (neg Y). */
8771 if (CONST_INT_P (XEXP (x, 0))
8772 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8774 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8775 return force_to_mode (x, mode, mask, next_select);
8778 /* Similarly, if C contains every bit in the fuller_mask, then we may
8779 replace with (not Y). */
8780 if (CONST_INT_P (XEXP (x, 0))
8781 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8783 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8784 return force_to_mode (x, mode, mask, next_select);
8787 mask = fuller_mask;
8788 goto binop;
8790 case IOR:
8791 case XOR:
8792 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8793 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8794 operation which may be a bitfield extraction. Ensure that the
8795 constant we form is not wider than the mode of X. */
8797 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8798 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8799 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8800 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8801 && CONST_INT_P (XEXP (x, 1))
8802 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8803 + floor_log2 (INTVAL (XEXP (x, 1))))
8804 < GET_MODE_PRECISION (xmode))
8805 && (UINTVAL (XEXP (x, 1))
8806 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8808 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8809 << INTVAL (XEXP (XEXP (x, 0), 1)),
8810 xmode);
8811 temp = simplify_gen_binary (GET_CODE (x), xmode,
8812 XEXP (XEXP (x, 0), 0), temp);
8813 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8814 XEXP (XEXP (x, 0), 1));
8815 return force_to_mode (x, mode, mask, next_select);
8818 binop:
8819 /* For most binary operations, just propagate into the operation and
8820 change the mode if we have an operation of that mode. */
8822 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8823 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8825 /* If we ended up truncating both operands, truncate the result of the
8826 operation instead. */
8827 if (GET_CODE (op0) == TRUNCATE
8828 && GET_CODE (op1) == TRUNCATE)
8830 op0 = XEXP (op0, 0);
8831 op1 = XEXP (op1, 0);
8834 op0 = gen_lowpart_or_truncate (op_mode, op0);
8835 op1 = gen_lowpart_or_truncate (op_mode, op1);
8837 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8839 x = simplify_gen_binary (code, op_mode, op0, op1);
8840 xmode = op_mode;
8842 break;
8844 case ASHIFT:
8845 /* For left shifts, do the same, but just for the first operand.
8846 However, we cannot do anything with shifts where we cannot
8847 guarantee that the counts are smaller than the size of the mode
8848 because such a count will have a different meaning in a
8849 wider mode. */
8851 if (! (CONST_INT_P (XEXP (x, 1))
8852 && INTVAL (XEXP (x, 1)) >= 0
8853 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8854 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8855 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8856 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8857 break;
8859 /* If the shift count is a constant and we can do arithmetic in
8860 the mode of the shift, refine which bits we need. Otherwise, use the
8861 conservative form of the mask. */
8862 if (CONST_INT_P (XEXP (x, 1))
8863 && INTVAL (XEXP (x, 1)) >= 0
8864 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8865 && HWI_COMPUTABLE_MODE_P (op_mode))
8866 mask >>= INTVAL (XEXP (x, 1));
8867 else
8868 mask = fuller_mask;
8870 op0 = gen_lowpart_or_truncate (op_mode,
8871 force_to_mode (XEXP (x, 0), op_mode,
8872 mask, next_select));
8874 if (op_mode != xmode || op0 != XEXP (x, 0))
8876 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8877 xmode = op_mode;
8879 break;
8881 case LSHIFTRT:
8882 /* Here we can only do something if the shift count is a constant,
8883 this shift constant is valid for the host, and we can do arithmetic
8884 in OP_MODE. */
8886 if (CONST_INT_P (XEXP (x, 1))
8887 && INTVAL (XEXP (x, 1)) >= 0
8888 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8889 && HWI_COMPUTABLE_MODE_P (op_mode))
8891 rtx inner = XEXP (x, 0);
8892 unsigned HOST_WIDE_INT inner_mask;
8894 /* Select the mask of the bits we need for the shift operand. */
8895 inner_mask = mask << INTVAL (XEXP (x, 1));
8897 /* We can only change the mode of the shift if we can do arithmetic
8898 in the mode of the shift and INNER_MASK is no wider than the
8899 width of X's mode. */
8900 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8901 op_mode = xmode;
8903 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8905 if (xmode != op_mode || inner != XEXP (x, 0))
8907 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8908 xmode = op_mode;
8912 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8913 shift and AND produces only copies of the sign bit (C2 is one less
8914 than a power of two), we can do this with just a shift. */
8916 if (GET_CODE (x) == LSHIFTRT
8917 && CONST_INT_P (XEXP (x, 1))
8918 /* The shift puts one of the sign bit copies in the least significant
8919 bit. */
8920 && ((INTVAL (XEXP (x, 1))
8921 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8922 >= GET_MODE_PRECISION (xmode))
8923 && pow2p_hwi (mask + 1)
8924 /* Number of bits left after the shift must be more than the mask
8925 needs. */
8926 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8927 <= GET_MODE_PRECISION (xmode))
8928 /* Must be more sign bit copies than the mask needs. */
8929 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8930 >= exact_log2 (mask + 1)))
8931 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
8932 GEN_INT (GET_MODE_PRECISION (xmode)
8933 - exact_log2 (mask + 1)));
8935 goto shiftrt;
8937 case ASHIFTRT:
8938 /* If we are just looking for the sign bit, we don't need this shift at
8939 all, even if it has a variable count. */
8940 if (val_signbit_p (xmode, mask))
8941 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8943 /* If this is a shift by a constant, get a mask that contains those bits
8944 that are not copies of the sign bit. We then have two cases: If
8945 MASK only includes those bits, this can be a logical shift, which may
8946 allow simplifications. If MASK is a single-bit field not within
8947 those bits, we are requesting a copy of the sign bit and hence can
8948 shift the sign bit to the appropriate location. */
8950 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8951 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8953 unsigned HOST_WIDE_INT nonzero;
8954 int i;
8956 /* If the considered data is wider than HOST_WIDE_INT, we can't
8957 represent a mask for all its bits in a single scalar.
8958 But we only care about the lower bits, so calculate these. */
8960 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
8962 nonzero = HOST_WIDE_INT_M1U;
8964 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8965 is the number of bits a full-width mask would have set.
8966 We need only shift if these are fewer than nonzero can
8967 hold. If not, we must keep all bits set in nonzero. */
8969 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
8970 < HOST_BITS_PER_WIDE_INT)
8971 nonzero >>= INTVAL (XEXP (x, 1))
8972 + HOST_BITS_PER_WIDE_INT
8973 - GET_MODE_PRECISION (xmode);
8975 else
8977 nonzero = GET_MODE_MASK (xmode);
8978 nonzero >>= INTVAL (XEXP (x, 1));
8981 if ((mask & ~nonzero) == 0)
8983 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
8984 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8985 if (GET_CODE (x) != ASHIFTRT)
8986 return force_to_mode (x, mode, mask, next_select);
8989 else if ((i = exact_log2 (mask)) >= 0)
8991 x = simplify_shift_const
8992 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
8993 GET_MODE_PRECISION (xmode) - 1 - i);
8995 if (GET_CODE (x) != ASHIFTRT)
8996 return force_to_mode (x, mode, mask, next_select);
9000 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9001 even if the shift count isn't a constant. */
9002 if (mask == 1)
9003 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9005 shiftrt:
9007 /* If this is a zero- or sign-extension operation that just affects bits
9008 we don't care about, remove it. Be sure the call above returned
9009 something that is still a shift. */
9011 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9012 && CONST_INT_P (XEXP (x, 1))
9013 && INTVAL (XEXP (x, 1)) >= 0
9014 && (INTVAL (XEXP (x, 1))
9015 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9016 && GET_CODE (XEXP (x, 0)) == ASHIFT
9017 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9018 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9019 next_select);
9021 break;
9023 case ROTATE:
9024 case ROTATERT:
9025 /* If the shift count is constant and we can do computations
9026 in the mode of X, compute where the bits we care about are.
9027 Otherwise, we can't do anything. Don't change the mode of
9028 the shift or propagate MODE into the shift, though. */
9029 if (CONST_INT_P (XEXP (x, 1))
9030 && INTVAL (XEXP (x, 1)) >= 0)
9032 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9033 xmode, gen_int_mode (mask, xmode),
9034 XEXP (x, 1));
9035 if (temp && CONST_INT_P (temp))
9036 x = simplify_gen_binary (code, xmode,
9037 force_to_mode (XEXP (x, 0), xmode,
9038 INTVAL (temp), next_select),
9039 XEXP (x, 1));
9041 break;
9043 case NEG:
9044 /* If we just want the low-order bit, the NEG isn't needed since it
9045 won't change the low-order bit. */
9046 if (mask == 1)
9047 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9049 /* We need any bits less significant than the most significant bit in
9050 MASK since carries from those bits will affect the bits we are
9051 interested in. */
9052 mask = fuller_mask;
9053 goto unop;
9055 case NOT:
9056 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9057 same as the XOR case above. Ensure that the constant we form is not
9058 wider than the mode of X. */
9060 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9061 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9062 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9063 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9064 < GET_MODE_PRECISION (xmode))
9065 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9067 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9068 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9069 x = simplify_gen_binary (LSHIFTRT, xmode,
9070 temp, XEXP (XEXP (x, 0), 1));
9072 return force_to_mode (x, mode, mask, next_select);
9075 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9076 use the full mask inside the NOT. */
9077 mask = fuller_mask;
9079 unop:
9080 op0 = gen_lowpart_or_truncate (op_mode,
9081 force_to_mode (XEXP (x, 0), mode, mask,
9082 next_select));
9083 if (op_mode != xmode || op0 != XEXP (x, 0))
9085 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9086 xmode = op_mode;
9088 break;
9090 case NE:
9091 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9092 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9093 which is equal to STORE_FLAG_VALUE. */
9094 if ((mask & ~STORE_FLAG_VALUE) == 0
9095 && XEXP (x, 1) == const0_rtx
9096 && GET_MODE (XEXP (x, 0)) == mode
9097 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9098 && (nonzero_bits (XEXP (x, 0), mode)
9099 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9100 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9102 break;
9104 case IF_THEN_ELSE:
9105 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9106 written in a narrower mode. We play it safe and do not do so. */
9108 op0 = gen_lowpart_or_truncate (xmode,
9109 force_to_mode (XEXP (x, 1), mode,
9110 mask, next_select));
9111 op1 = gen_lowpart_or_truncate (xmode,
9112 force_to_mode (XEXP (x, 2), mode,
9113 mask, next_select));
9114 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9115 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9116 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9117 op0, op1);
9118 break;
9120 default:
9121 break;
9124 /* Ensure we return a value of the proper mode. */
9125 return gen_lowpart_or_truncate (mode, x);
9128 /* Return nonzero if X is an expression that has one of two values depending on
9129 whether some other value is zero or nonzero. In that case, we return the
9130 value that is being tested, *PTRUE is set to the value if the rtx being
9131 returned has a nonzero value, and *PFALSE is set to the other alternative.
9133 If we return zero, we set *PTRUE and *PFALSE to X. */
9135 static rtx
9136 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9138 machine_mode mode = GET_MODE (x);
9139 enum rtx_code code = GET_CODE (x);
9140 rtx cond0, cond1, true0, true1, false0, false1;
9141 unsigned HOST_WIDE_INT nz;
9142 scalar_int_mode int_mode;
9144 /* If we are comparing a value against zero, we are done. */
9145 if ((code == NE || code == EQ)
9146 && XEXP (x, 1) == const0_rtx)
9148 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9149 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9150 return XEXP (x, 0);
9153 /* If this is a unary operation whose operand has one of two values, apply
9154 our opcode to compute those values. */
9155 else if (UNARY_P (x)
9156 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9158 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9159 *pfalse = simplify_gen_unary (code, mode, false0,
9160 GET_MODE (XEXP (x, 0)));
9161 return cond0;
9164 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9165 make can't possibly match and would suppress other optimizations. */
9166 else if (code == COMPARE)
9169 /* If this is a binary operation, see if either side has only one of two
9170 values. If either one does or if both do and they are conditional on
9171 the same value, compute the new true and false values. */
9172 else if (BINARY_P (x))
9174 rtx op0 = XEXP (x, 0);
9175 rtx op1 = XEXP (x, 1);
9176 cond0 = if_then_else_cond (op0, &true0, &false0);
9177 cond1 = if_then_else_cond (op1, &true1, &false1);
9179 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9180 && (REG_P (op0) || REG_P (op1)))
9182 /* Try to enable a simplification by undoing work done by
9183 if_then_else_cond if it converted a REG into something more
9184 complex. */
9185 if (REG_P (op0))
9187 cond0 = 0;
9188 true0 = false0 = op0;
9190 else
9192 cond1 = 0;
9193 true1 = false1 = op1;
9197 if ((cond0 != 0 || cond1 != 0)
9198 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9200 /* If if_then_else_cond returned zero, then true/false are the
9201 same rtl. We must copy one of them to prevent invalid rtl
9202 sharing. */
9203 if (cond0 == 0)
9204 true0 = copy_rtx (true0);
9205 else if (cond1 == 0)
9206 true1 = copy_rtx (true1);
9208 if (COMPARISON_P (x))
9210 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9211 true0, true1);
9212 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9213 false0, false1);
9215 else
9217 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9218 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9221 return cond0 ? cond0 : cond1;
9224 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9225 operands is zero when the other is nonzero, and vice-versa,
9226 and STORE_FLAG_VALUE is 1 or -1. */
9228 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9229 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9230 || code == UMAX)
9231 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9233 rtx op0 = XEXP (XEXP (x, 0), 1);
9234 rtx op1 = XEXP (XEXP (x, 1), 1);
9236 cond0 = XEXP (XEXP (x, 0), 0);
9237 cond1 = XEXP (XEXP (x, 1), 0);
9239 if (COMPARISON_P (cond0)
9240 && COMPARISON_P (cond1)
9241 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9242 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9243 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9244 || ((swap_condition (GET_CODE (cond0))
9245 == reversed_comparison_code (cond1, NULL))
9246 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9247 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9248 && ! side_effects_p (x))
9250 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9251 *pfalse = simplify_gen_binary (MULT, mode,
9252 (code == MINUS
9253 ? simplify_gen_unary (NEG, mode,
9254 op1, mode)
9255 : op1),
9256 const_true_rtx);
9257 return cond0;
9261 /* Similarly for MULT, AND and UMIN, except that for these the result
9262 is always zero. */
9263 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9264 && (code == MULT || code == AND || code == UMIN)
9265 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9267 cond0 = XEXP (XEXP (x, 0), 0);
9268 cond1 = XEXP (XEXP (x, 1), 0);
9270 if (COMPARISON_P (cond0)
9271 && COMPARISON_P (cond1)
9272 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9273 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9274 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9275 || ((swap_condition (GET_CODE (cond0))
9276 == reversed_comparison_code (cond1, NULL))
9277 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9278 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9279 && ! side_effects_p (x))
9281 *ptrue = *pfalse = const0_rtx;
9282 return cond0;
9287 else if (code == IF_THEN_ELSE)
9289 /* If we have IF_THEN_ELSE already, extract the condition and
9290 canonicalize it if it is NE or EQ. */
9291 cond0 = XEXP (x, 0);
9292 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9293 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9294 return XEXP (cond0, 0);
9295 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9297 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9298 return XEXP (cond0, 0);
9300 else
9301 return cond0;
9304 /* If X is a SUBREG, we can narrow both the true and false values
9305 if the inner expression, if there is a condition. */
9306 else if (code == SUBREG
9307 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9308 &true0, &false0)))
9310 true0 = simplify_gen_subreg (mode, true0,
9311 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9312 false0 = simplify_gen_subreg (mode, false0,
9313 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9314 if (true0 && false0)
9316 *ptrue = true0;
9317 *pfalse = false0;
9318 return cond0;
9322 /* If X is a constant, this isn't special and will cause confusions
9323 if we treat it as such. Likewise if it is equivalent to a constant. */
9324 else if (CONSTANT_P (x)
9325 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9328 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9329 will be least confusing to the rest of the compiler. */
9330 else if (mode == BImode)
9332 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9333 return x;
9336 /* If X is known to be either 0 or -1, those are the true and
9337 false values when testing X. */
9338 else if (x == constm1_rtx || x == const0_rtx
9339 || (is_a <scalar_int_mode> (mode, &int_mode)
9340 && (num_sign_bit_copies (x, int_mode)
9341 == GET_MODE_PRECISION (int_mode))))
9343 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9344 return x;
9347 /* Likewise for 0 or a single bit. */
9348 else if (HWI_COMPUTABLE_MODE_P (mode)
9349 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9351 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9352 return x;
9355 /* Otherwise fail; show no condition with true and false values the same. */
9356 *ptrue = *pfalse = x;
9357 return 0;
9360 /* Return the value of expression X given the fact that condition COND
9361 is known to be true when applied to REG as its first operand and VAL
9362 as its second. X is known to not be shared and so can be modified in
9363 place.
9365 We only handle the simplest cases, and specifically those cases that
9366 arise with IF_THEN_ELSE expressions. */
9368 static rtx
9369 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9371 enum rtx_code code = GET_CODE (x);
9372 const char *fmt;
9373 int i, j;
9375 if (side_effects_p (x))
9376 return x;
9378 /* If either operand of the condition is a floating point value,
9379 then we have to avoid collapsing an EQ comparison. */
9380 if (cond == EQ
9381 && rtx_equal_p (x, reg)
9382 && ! FLOAT_MODE_P (GET_MODE (x))
9383 && ! FLOAT_MODE_P (GET_MODE (val)))
9384 return val;
9386 if (cond == UNEQ && rtx_equal_p (x, reg))
9387 return val;
9389 /* If X is (abs REG) and we know something about REG's relationship
9390 with zero, we may be able to simplify this. */
9392 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9393 switch (cond)
9395 case GE: case GT: case EQ:
9396 return XEXP (x, 0);
9397 case LT: case LE:
9398 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9399 XEXP (x, 0),
9400 GET_MODE (XEXP (x, 0)));
9401 default:
9402 break;
9405 /* The only other cases we handle are MIN, MAX, and comparisons if the
9406 operands are the same as REG and VAL. */
9408 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9410 if (rtx_equal_p (XEXP (x, 0), val))
9412 std::swap (val, reg);
9413 cond = swap_condition (cond);
9416 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9418 if (COMPARISON_P (x))
9420 if (comparison_dominates_p (cond, code))
9421 return const_true_rtx;
9423 code = reversed_comparison_code (x, NULL);
9424 if (code != UNKNOWN
9425 && comparison_dominates_p (cond, code))
9426 return const0_rtx;
9427 else
9428 return x;
9430 else if (code == SMAX || code == SMIN
9431 || code == UMIN || code == UMAX)
9433 int unsignedp = (code == UMIN || code == UMAX);
9435 /* Do not reverse the condition when it is NE or EQ.
9436 This is because we cannot conclude anything about
9437 the value of 'SMAX (x, y)' when x is not equal to y,
9438 but we can when x equals y. */
9439 if ((code == SMAX || code == UMAX)
9440 && ! (cond == EQ || cond == NE))
9441 cond = reverse_condition (cond);
9443 switch (cond)
9445 case GE: case GT:
9446 return unsignedp ? x : XEXP (x, 1);
9447 case LE: case LT:
9448 return unsignedp ? x : XEXP (x, 0);
9449 case GEU: case GTU:
9450 return unsignedp ? XEXP (x, 1) : x;
9451 case LEU: case LTU:
9452 return unsignedp ? XEXP (x, 0) : x;
9453 default:
9454 break;
9459 else if (code == SUBREG)
9461 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9462 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9464 if (SUBREG_REG (x) != r)
9466 /* We must simplify subreg here, before we lose track of the
9467 original inner_mode. */
9468 new_rtx = simplify_subreg (GET_MODE (x), r,
9469 inner_mode, SUBREG_BYTE (x));
9470 if (new_rtx)
9471 return new_rtx;
9472 else
9473 SUBST (SUBREG_REG (x), r);
9476 return x;
9478 /* We don't have to handle SIGN_EXTEND here, because even in the
9479 case of replacing something with a modeless CONST_INT, a
9480 CONST_INT is already (supposed to be) a valid sign extension for
9481 its narrower mode, which implies it's already properly
9482 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9483 story is different. */
9484 else if (code == ZERO_EXTEND)
9486 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9487 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9489 if (XEXP (x, 0) != r)
9491 /* We must simplify the zero_extend here, before we lose
9492 track of the original inner_mode. */
9493 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9494 r, inner_mode);
9495 if (new_rtx)
9496 return new_rtx;
9497 else
9498 SUBST (XEXP (x, 0), r);
9501 return x;
9504 fmt = GET_RTX_FORMAT (code);
9505 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9507 if (fmt[i] == 'e')
9508 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9509 else if (fmt[i] == 'E')
9510 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9511 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9512 cond, reg, val));
9515 return x;
9518 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9519 assignment as a field assignment. */
9521 static int
9522 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9524 if (widen_x && GET_MODE (x) != GET_MODE (y))
9526 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9527 return 0;
9528 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9529 return 0;
9530 x = adjust_address_nv (x, GET_MODE (y),
9531 byte_lowpart_offset (GET_MODE (y),
9532 GET_MODE (x)));
9535 if (x == y || rtx_equal_p (x, y))
9536 return 1;
9538 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9539 return 0;
9541 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9542 Note that all SUBREGs of MEM are paradoxical; otherwise they
9543 would have been rewritten. */
9544 if (MEM_P (x) && GET_CODE (y) == SUBREG
9545 && MEM_P (SUBREG_REG (y))
9546 && rtx_equal_p (SUBREG_REG (y),
9547 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9548 return 1;
9550 if (MEM_P (y) && GET_CODE (x) == SUBREG
9551 && MEM_P (SUBREG_REG (x))
9552 && rtx_equal_p (SUBREG_REG (x),
9553 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9554 return 1;
9556 /* We used to see if get_last_value of X and Y were the same but that's
9557 not correct. In one direction, we'll cause the assignment to have
9558 the wrong destination and in the case, we'll import a register into this
9559 insn that might have already have been dead. So fail if none of the
9560 above cases are true. */
9561 return 0;
9564 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9565 Return that assignment if so.
9567 We only handle the most common cases. */
9569 static rtx
9570 make_field_assignment (rtx x)
9572 rtx dest = SET_DEST (x);
9573 rtx src = SET_SRC (x);
9574 rtx assign;
9575 rtx rhs, lhs;
9576 HOST_WIDE_INT c1;
9577 HOST_WIDE_INT pos;
9578 unsigned HOST_WIDE_INT len;
9579 rtx other;
9581 /* All the rules in this function are specific to scalar integers. */
9582 scalar_int_mode mode;
9583 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9584 return x;
9586 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9587 a clear of a one-bit field. We will have changed it to
9588 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9589 for a SUBREG. */
9591 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9592 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9593 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9594 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9596 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9597 1, 1, 1, 0);
9598 if (assign != 0)
9599 return gen_rtx_SET (assign, const0_rtx);
9600 return x;
9603 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9604 && subreg_lowpart_p (XEXP (src, 0))
9605 && partial_subreg_p (XEXP (src, 0))
9606 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9607 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9608 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9609 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9611 assign = make_extraction (VOIDmode, dest, 0,
9612 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9613 1, 1, 1, 0);
9614 if (assign != 0)
9615 return gen_rtx_SET (assign, const0_rtx);
9616 return x;
9619 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9620 one-bit field. */
9621 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9622 && XEXP (XEXP (src, 0), 0) == const1_rtx
9623 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9625 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9626 1, 1, 1, 0);
9627 if (assign != 0)
9628 return gen_rtx_SET (assign, const1_rtx);
9629 return x;
9632 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9633 SRC is an AND with all bits of that field set, then we can discard
9634 the AND. */
9635 if (GET_CODE (dest) == ZERO_EXTRACT
9636 && CONST_INT_P (XEXP (dest, 1))
9637 && GET_CODE (src) == AND
9638 && CONST_INT_P (XEXP (src, 1)))
9640 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9641 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9642 unsigned HOST_WIDE_INT ze_mask;
9644 if (width >= HOST_BITS_PER_WIDE_INT)
9645 ze_mask = -1;
9646 else
9647 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9649 /* Complete overlap. We can remove the source AND. */
9650 if ((and_mask & ze_mask) == ze_mask)
9651 return gen_rtx_SET (dest, XEXP (src, 0));
9653 /* Partial overlap. We can reduce the source AND. */
9654 if ((and_mask & ze_mask) != and_mask)
9656 src = gen_rtx_AND (mode, XEXP (src, 0),
9657 gen_int_mode (and_mask & ze_mask, mode));
9658 return gen_rtx_SET (dest, src);
9662 /* The other case we handle is assignments into a constant-position
9663 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9664 a mask that has all one bits except for a group of zero bits and
9665 OTHER is known to have zeros where C1 has ones, this is such an
9666 assignment. Compute the position and length from C1. Shift OTHER
9667 to the appropriate position, force it to the required mode, and
9668 make the extraction. Check for the AND in both operands. */
9670 /* One or more SUBREGs might obscure the constant-position field
9671 assignment. The first one we are likely to encounter is an outer
9672 narrowing SUBREG, which we can just strip for the purposes of
9673 identifying the constant-field assignment. */
9674 scalar_int_mode src_mode = mode;
9675 if (GET_CODE (src) == SUBREG
9676 && subreg_lowpart_p (src)
9677 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9678 src = SUBREG_REG (src);
9680 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9681 return x;
9683 rhs = expand_compound_operation (XEXP (src, 0));
9684 lhs = expand_compound_operation (XEXP (src, 1));
9686 if (GET_CODE (rhs) == AND
9687 && CONST_INT_P (XEXP (rhs, 1))
9688 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9689 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9690 /* The second SUBREG that might get in the way is a paradoxical
9691 SUBREG around the first operand of the AND. We want to
9692 pretend the operand is as wide as the destination here. We
9693 do this by adjusting the MEM to wider mode for the sole
9694 purpose of the call to rtx_equal_for_field_assignment_p. Also
9695 note this trick only works for MEMs. */
9696 else if (GET_CODE (rhs) == AND
9697 && paradoxical_subreg_p (XEXP (rhs, 0))
9698 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9699 && CONST_INT_P (XEXP (rhs, 1))
9700 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9701 dest, true))
9702 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9703 else if (GET_CODE (lhs) == AND
9704 && CONST_INT_P (XEXP (lhs, 1))
9705 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9706 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9707 /* The second SUBREG that might get in the way is a paradoxical
9708 SUBREG around the first operand of the AND. We want to
9709 pretend the operand is as wide as the destination here. We
9710 do this by adjusting the MEM to wider mode for the sole
9711 purpose of the call to rtx_equal_for_field_assignment_p. Also
9712 note this trick only works for MEMs. */
9713 else if (GET_CODE (lhs) == AND
9714 && paradoxical_subreg_p (XEXP (lhs, 0))
9715 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9716 && CONST_INT_P (XEXP (lhs, 1))
9717 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9718 dest, true))
9719 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9720 else
9721 return x;
9723 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9724 if (pos < 0
9725 || pos + len > GET_MODE_PRECISION (mode)
9726 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9727 || (c1 & nonzero_bits (other, mode)) != 0)
9728 return x;
9730 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9731 if (assign == 0)
9732 return x;
9734 /* The mode to use for the source is the mode of the assignment, or of
9735 what is inside a possible STRICT_LOW_PART. */
9736 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9737 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9739 /* Shift OTHER right POS places and make it the source, restricting it
9740 to the proper length and mode. */
9742 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9743 src_mode, other, pos),
9744 dest);
9745 src = force_to_mode (src, new_mode,
9746 len >= HOST_BITS_PER_WIDE_INT
9747 ? HOST_WIDE_INT_M1U
9748 : (HOST_WIDE_INT_1U << len) - 1,
9751 /* If SRC is masked by an AND that does not make a difference in
9752 the value being stored, strip it. */
9753 if (GET_CODE (assign) == ZERO_EXTRACT
9754 && CONST_INT_P (XEXP (assign, 1))
9755 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9756 && GET_CODE (src) == AND
9757 && CONST_INT_P (XEXP (src, 1))
9758 && UINTVAL (XEXP (src, 1))
9759 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9760 src = XEXP (src, 0);
9762 return gen_rtx_SET (assign, src);
9765 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9766 if so. */
9768 static rtx
9769 apply_distributive_law (rtx x)
9771 enum rtx_code code = GET_CODE (x);
9772 enum rtx_code inner_code;
9773 rtx lhs, rhs, other;
9774 rtx tem;
9776 /* Distributivity is not true for floating point as it can change the
9777 value. So we don't do it unless -funsafe-math-optimizations. */
9778 if (FLOAT_MODE_P (GET_MODE (x))
9779 && ! flag_unsafe_math_optimizations)
9780 return x;
9782 /* The outer operation can only be one of the following: */
9783 if (code != IOR && code != AND && code != XOR
9784 && code != PLUS && code != MINUS)
9785 return x;
9787 lhs = XEXP (x, 0);
9788 rhs = XEXP (x, 1);
9790 /* If either operand is a primitive we can't do anything, so get out
9791 fast. */
9792 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9793 return x;
9795 lhs = expand_compound_operation (lhs);
9796 rhs = expand_compound_operation (rhs);
9797 inner_code = GET_CODE (lhs);
9798 if (inner_code != GET_CODE (rhs))
9799 return x;
9801 /* See if the inner and outer operations distribute. */
9802 switch (inner_code)
9804 case LSHIFTRT:
9805 case ASHIFTRT:
9806 case AND:
9807 case IOR:
9808 /* These all distribute except over PLUS. */
9809 if (code == PLUS || code == MINUS)
9810 return x;
9811 break;
9813 case MULT:
9814 if (code != PLUS && code != MINUS)
9815 return x;
9816 break;
9818 case ASHIFT:
9819 /* This is also a multiply, so it distributes over everything. */
9820 break;
9822 /* This used to handle SUBREG, but this turned out to be counter-
9823 productive, since (subreg (op ...)) usually is not handled by
9824 insn patterns, and this "optimization" therefore transformed
9825 recognizable patterns into unrecognizable ones. Therefore the
9826 SUBREG case was removed from here.
9828 It is possible that distributing SUBREG over arithmetic operations
9829 leads to an intermediate result than can then be optimized further,
9830 e.g. by moving the outer SUBREG to the other side of a SET as done
9831 in simplify_set. This seems to have been the original intent of
9832 handling SUBREGs here.
9834 However, with current GCC this does not appear to actually happen,
9835 at least on major platforms. If some case is found where removing
9836 the SUBREG case here prevents follow-on optimizations, distributing
9837 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9839 default:
9840 return x;
9843 /* Set LHS and RHS to the inner operands (A and B in the example
9844 above) and set OTHER to the common operand (C in the example).
9845 There is only one way to do this unless the inner operation is
9846 commutative. */
9847 if (COMMUTATIVE_ARITH_P (lhs)
9848 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9849 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9850 else if (COMMUTATIVE_ARITH_P (lhs)
9851 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9852 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9853 else if (COMMUTATIVE_ARITH_P (lhs)
9854 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9855 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9856 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9857 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9858 else
9859 return x;
9861 /* Form the new inner operation, seeing if it simplifies first. */
9862 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9864 /* There is one exception to the general way of distributing:
9865 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9866 if (code == XOR && inner_code == IOR)
9868 inner_code = AND;
9869 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9872 /* We may be able to continuing distributing the result, so call
9873 ourselves recursively on the inner operation before forming the
9874 outer operation, which we return. */
9875 return simplify_gen_binary (inner_code, GET_MODE (x),
9876 apply_distributive_law (tem), other);
9879 /* See if X is of the form (* (+ A B) C), and if so convert to
9880 (+ (* A C) (* B C)) and try to simplify.
9882 Most of the time, this results in no change. However, if some of
9883 the operands are the same or inverses of each other, simplifications
9884 will result.
9886 For example, (and (ior A B) (not B)) can occur as the result of
9887 expanding a bit field assignment. When we apply the distributive
9888 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9889 which then simplifies to (and (A (not B))).
9891 Note that no checks happen on the validity of applying the inverse
9892 distributive law. This is pointless since we can do it in the
9893 few places where this routine is called.
9895 N is the index of the term that is decomposed (the arithmetic operation,
9896 i.e. (+ A B) in the first example above). !N is the index of the term that
9897 is distributed, i.e. of C in the first example above. */
9898 static rtx
9899 distribute_and_simplify_rtx (rtx x, int n)
9901 machine_mode mode;
9902 enum rtx_code outer_code, inner_code;
9903 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9905 /* Distributivity is not true for floating point as it can change the
9906 value. So we don't do it unless -funsafe-math-optimizations. */
9907 if (FLOAT_MODE_P (GET_MODE (x))
9908 && ! flag_unsafe_math_optimizations)
9909 return NULL_RTX;
9911 decomposed = XEXP (x, n);
9912 if (!ARITHMETIC_P (decomposed))
9913 return NULL_RTX;
9915 mode = GET_MODE (x);
9916 outer_code = GET_CODE (x);
9917 distributed = XEXP (x, !n);
9919 inner_code = GET_CODE (decomposed);
9920 inner_op0 = XEXP (decomposed, 0);
9921 inner_op1 = XEXP (decomposed, 1);
9923 /* Special case (and (xor B C) (not A)), which is equivalent to
9924 (xor (ior A B) (ior A C)) */
9925 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9927 distributed = XEXP (distributed, 0);
9928 outer_code = IOR;
9931 if (n == 0)
9933 /* Distribute the second term. */
9934 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9935 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9937 else
9939 /* Distribute the first term. */
9940 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9941 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9944 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9945 new_op0, new_op1));
9946 if (GET_CODE (tmp) != outer_code
9947 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9948 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9949 return tmp;
9951 return NULL_RTX;
9954 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9955 in MODE. Return an equivalent form, if different from (and VAROP
9956 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9958 static rtx
9959 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
9960 unsigned HOST_WIDE_INT constop)
9962 unsigned HOST_WIDE_INT nonzero;
9963 unsigned HOST_WIDE_INT orig_constop;
9964 rtx orig_varop;
9965 int i;
9967 orig_varop = varop;
9968 orig_constop = constop;
9969 if (GET_CODE (varop) == CLOBBER)
9970 return NULL_RTX;
9972 /* Simplify VAROP knowing that we will be only looking at some of the
9973 bits in it.
9975 Note by passing in CONSTOP, we guarantee that the bits not set in
9976 CONSTOP are not significant and will never be examined. We must
9977 ensure that is the case by explicitly masking out those bits
9978 before returning. */
9979 varop = force_to_mode (varop, mode, constop, 0);
9981 /* If VAROP is a CLOBBER, we will fail so return it. */
9982 if (GET_CODE (varop) == CLOBBER)
9983 return varop;
9985 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9986 to VAROP and return the new constant. */
9987 if (CONST_INT_P (varop))
9988 return gen_int_mode (INTVAL (varop) & constop, mode);
9990 /* See what bits may be nonzero in VAROP. Unlike the general case of
9991 a call to nonzero_bits, here we don't care about bits outside
9992 MODE. */
9994 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9996 /* Turn off all bits in the constant that are known to already be zero.
9997 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9998 which is tested below. */
10000 constop &= nonzero;
10002 /* If we don't have any bits left, return zero. */
10003 if (constop == 0)
10004 return const0_rtx;
10006 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10007 a power of two, we can replace this with an ASHIFT. */
10008 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10009 && (i = exact_log2 (constop)) >= 0)
10010 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10012 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10013 or XOR, then try to apply the distributive law. This may eliminate
10014 operations if either branch can be simplified because of the AND.
10015 It may also make some cases more complex, but those cases probably
10016 won't match a pattern either with or without this. */
10018 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10020 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10021 return
10022 gen_lowpart
10023 (mode,
10024 apply_distributive_law
10025 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10026 simplify_and_const_int (NULL_RTX, varop_mode,
10027 XEXP (varop, 0),
10028 constop),
10029 simplify_and_const_int (NULL_RTX, varop_mode,
10030 XEXP (varop, 1),
10031 constop))));
10034 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10035 the AND and see if one of the operands simplifies to zero. If so, we
10036 may eliminate it. */
10038 if (GET_CODE (varop) == PLUS
10039 && pow2p_hwi (constop + 1))
10041 rtx o0, o1;
10043 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10044 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10045 if (o0 == const0_rtx)
10046 return o1;
10047 if (o1 == const0_rtx)
10048 return o0;
10051 /* Make a SUBREG if necessary. If we can't make it, fail. */
10052 varop = gen_lowpart (mode, varop);
10053 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10054 return NULL_RTX;
10056 /* If we are only masking insignificant bits, return VAROP. */
10057 if (constop == nonzero)
10058 return varop;
10060 if (varop == orig_varop && constop == orig_constop)
10061 return NULL_RTX;
10063 /* Otherwise, return an AND. */
10064 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10068 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10069 in MODE.
10071 Return an equivalent form, if different from X. Otherwise, return X. If
10072 X is zero, we are to always construct the equivalent form. */
10074 static rtx
10075 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10076 unsigned HOST_WIDE_INT constop)
10078 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10079 if (tem)
10080 return tem;
10082 if (!x)
10083 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10084 gen_int_mode (constop, mode));
10085 if (GET_MODE (x) != mode)
10086 x = gen_lowpart (mode, x);
10087 return x;
10090 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10091 We don't care about bits outside of those defined in MODE.
10093 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10094 a shift, AND, or zero_extract, we can do better. */
10096 static rtx
10097 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10098 scalar_int_mode mode,
10099 unsigned HOST_WIDE_INT *nonzero)
10101 rtx tem;
10102 reg_stat_type *rsp;
10104 /* If X is a register whose nonzero bits value is current, use it.
10105 Otherwise, if X is a register whose value we can find, use that
10106 value. Otherwise, use the previously-computed global nonzero bits
10107 for this register. */
10109 rsp = &reg_stat[REGNO (x)];
10110 if (rsp->last_set_value != 0
10111 && (rsp->last_set_mode == mode
10112 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10113 && GET_MODE_CLASS (mode) == MODE_INT))
10114 && ((rsp->last_set_label >= label_tick_ebb_start
10115 && rsp->last_set_label < label_tick)
10116 || (rsp->last_set_label == label_tick
10117 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10118 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10119 && REGNO (x) < reg_n_sets_max
10120 && REG_N_SETS (REGNO (x)) == 1
10121 && !REGNO_REG_SET_P
10122 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10123 REGNO (x)))))
10125 /* Note that, even if the precision of last_set_mode is lower than that
10126 of mode, record_value_for_reg invoked nonzero_bits on the register
10127 with nonzero_bits_mode (because last_set_mode is necessarily integral
10128 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10129 are all valid, hence in mode too since nonzero_bits_mode is defined
10130 to the largest HWI_COMPUTABLE_MODE_P mode. */
10131 *nonzero &= rsp->last_set_nonzero_bits;
10132 return NULL;
10135 tem = get_last_value (x);
10136 if (tem)
10138 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10139 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10141 return tem;
10144 if (nonzero_sign_valid && rsp->nonzero_bits)
10146 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10148 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10149 /* We don't know anything about the upper bits. */
10150 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10152 *nonzero &= mask;
10155 return NULL;
10158 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10159 end of X that are known to be equal to the sign bit. X will be used
10160 in mode MODE; the returned value will always be between 1 and the
10161 number of bits in MODE. */
10163 static rtx
10164 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10165 scalar_int_mode mode,
10166 unsigned int *result)
10168 rtx tem;
10169 reg_stat_type *rsp;
10171 rsp = &reg_stat[REGNO (x)];
10172 if (rsp->last_set_value != 0
10173 && rsp->last_set_mode == mode
10174 && ((rsp->last_set_label >= label_tick_ebb_start
10175 && rsp->last_set_label < label_tick)
10176 || (rsp->last_set_label == label_tick
10177 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10178 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10179 && REGNO (x) < reg_n_sets_max
10180 && REG_N_SETS (REGNO (x)) == 1
10181 && !REGNO_REG_SET_P
10182 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10183 REGNO (x)))))
10185 *result = rsp->last_set_sign_bit_copies;
10186 return NULL;
10189 tem = get_last_value (x);
10190 if (tem != 0)
10191 return tem;
10193 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10194 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10195 *result = rsp->sign_bit_copies;
10197 return NULL;
10200 /* Return the number of "extended" bits there are in X, when interpreted
10201 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10202 unsigned quantities, this is the number of high-order zero bits.
10203 For signed quantities, this is the number of copies of the sign bit
10204 minus 1. In both case, this function returns the number of "spare"
10205 bits. For example, if two quantities for which this function returns
10206 at least 1 are added, the addition is known not to overflow.
10208 This function will always return 0 unless called during combine, which
10209 implies that it must be called from a define_split. */
10211 unsigned int
10212 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10214 if (nonzero_sign_valid == 0)
10215 return 0;
10217 scalar_int_mode int_mode;
10218 return (unsignedp
10219 ? (is_a <scalar_int_mode> (mode, &int_mode)
10220 && HWI_COMPUTABLE_MODE_P (int_mode)
10221 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10222 - floor_log2 (nonzero_bits (x, int_mode)))
10223 : 0)
10224 : num_sign_bit_copies (x, mode) - 1);
10227 /* This function is called from `simplify_shift_const' to merge two
10228 outer operations. Specifically, we have already found that we need
10229 to perform operation *POP0 with constant *PCONST0 at the outermost
10230 position. We would now like to also perform OP1 with constant CONST1
10231 (with *POP0 being done last).
10233 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10234 the resulting operation. *PCOMP_P is set to 1 if we would need to
10235 complement the innermost operand, otherwise it is unchanged.
10237 MODE is the mode in which the operation will be done. No bits outside
10238 the width of this mode matter. It is assumed that the width of this mode
10239 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10241 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10242 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10243 result is simply *PCONST0.
10245 If the resulting operation cannot be expressed as one operation, we
10246 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10248 static int
10249 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10251 enum rtx_code op0 = *pop0;
10252 HOST_WIDE_INT const0 = *pconst0;
10254 const0 &= GET_MODE_MASK (mode);
10255 const1 &= GET_MODE_MASK (mode);
10257 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10258 if (op0 == AND)
10259 const1 &= const0;
10261 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10262 if OP0 is SET. */
10264 if (op1 == UNKNOWN || op0 == SET)
10265 return 1;
10267 else if (op0 == UNKNOWN)
10268 op0 = op1, const0 = const1;
10270 else if (op0 == op1)
10272 switch (op0)
10274 case AND:
10275 const0 &= const1;
10276 break;
10277 case IOR:
10278 const0 |= const1;
10279 break;
10280 case XOR:
10281 const0 ^= const1;
10282 break;
10283 case PLUS:
10284 const0 += const1;
10285 break;
10286 case NEG:
10287 op0 = UNKNOWN;
10288 break;
10289 default:
10290 break;
10294 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10295 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10296 return 0;
10298 /* If the two constants aren't the same, we can't do anything. The
10299 remaining six cases can all be done. */
10300 else if (const0 != const1)
10301 return 0;
10303 else
10304 switch (op0)
10306 case IOR:
10307 if (op1 == AND)
10308 /* (a & b) | b == b */
10309 op0 = SET;
10310 else /* op1 == XOR */
10311 /* (a ^ b) | b == a | b */
10313 break;
10315 case XOR:
10316 if (op1 == AND)
10317 /* (a & b) ^ b == (~a) & b */
10318 op0 = AND, *pcomp_p = 1;
10319 else /* op1 == IOR */
10320 /* (a | b) ^ b == a & ~b */
10321 op0 = AND, const0 = ~const0;
10322 break;
10324 case AND:
10325 if (op1 == IOR)
10326 /* (a | b) & b == b */
10327 op0 = SET;
10328 else /* op1 == XOR */
10329 /* (a ^ b) & b) == (~a) & b */
10330 *pcomp_p = 1;
10331 break;
10332 default:
10333 break;
10336 /* Check for NO-OP cases. */
10337 const0 &= GET_MODE_MASK (mode);
10338 if (const0 == 0
10339 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10340 op0 = UNKNOWN;
10341 else if (const0 == 0 && op0 == AND)
10342 op0 = SET;
10343 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10344 && op0 == AND)
10345 op0 = UNKNOWN;
10347 *pop0 = op0;
10349 /* ??? Slightly redundant with the above mask, but not entirely.
10350 Moving this above means we'd have to sign-extend the mode mask
10351 for the final test. */
10352 if (op0 != UNKNOWN && op0 != NEG)
10353 *pconst0 = trunc_int_for_mode (const0, mode);
10355 return 1;
10358 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10359 the shift in. The original shift operation CODE is performed on OP in
10360 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10361 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10362 result of the shift is subject to operation OUTER_CODE with operand
10363 OUTER_CONST. */
10365 static scalar_int_mode
10366 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10367 scalar_int_mode orig_mode, scalar_int_mode mode,
10368 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10370 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10372 /* In general we can't perform in wider mode for right shift and rotate. */
10373 switch (code)
10375 case ASHIFTRT:
10376 /* We can still widen if the bits brought in from the left are identical
10377 to the sign bit of ORIG_MODE. */
10378 if (num_sign_bit_copies (op, mode)
10379 > (unsigned) (GET_MODE_PRECISION (mode)
10380 - GET_MODE_PRECISION (orig_mode)))
10381 return mode;
10382 return orig_mode;
10384 case LSHIFTRT:
10385 /* Similarly here but with zero bits. */
10386 if (HWI_COMPUTABLE_MODE_P (mode)
10387 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10388 return mode;
10390 /* We can also widen if the bits brought in will be masked off. This
10391 operation is performed in ORIG_MODE. */
10392 if (outer_code == AND)
10394 int care_bits = low_bitmask_len (orig_mode, outer_const);
10396 if (care_bits >= 0
10397 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10398 return mode;
10400 /* fall through */
10402 case ROTATE:
10403 return orig_mode;
10405 case ROTATERT:
10406 gcc_unreachable ();
10408 default:
10409 return mode;
10413 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10414 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10415 if we cannot simplify it. Otherwise, return a simplified value.
10417 The shift is normally computed in the widest mode we find in VAROP, as
10418 long as it isn't a different number of words than RESULT_MODE. Exceptions
10419 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10421 static rtx
10422 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10423 rtx varop, int orig_count)
10425 enum rtx_code orig_code = code;
10426 rtx orig_varop = varop;
10427 int count;
10428 machine_mode mode = result_mode;
10429 machine_mode shift_mode;
10430 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10431 unsigned int mode_words
10432 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10433 /* We form (outer_op (code varop count) (outer_const)). */
10434 enum rtx_code outer_op = UNKNOWN;
10435 HOST_WIDE_INT outer_const = 0;
10436 int complement_p = 0;
10437 rtx new_rtx, x;
10439 /* Make sure and truncate the "natural" shift on the way in. We don't
10440 want to do this inside the loop as it makes it more difficult to
10441 combine shifts. */
10442 if (SHIFT_COUNT_TRUNCATED)
10443 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10445 /* If we were given an invalid count, don't do anything except exactly
10446 what was requested. */
10448 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10449 return NULL_RTX;
10451 count = orig_count;
10453 /* Unless one of the branches of the `if' in this loop does a `continue',
10454 we will `break' the loop after the `if'. */
10456 while (count != 0)
10458 /* If we have an operand of (clobber (const_int 0)), fail. */
10459 if (GET_CODE (varop) == CLOBBER)
10460 return NULL_RTX;
10462 /* Convert ROTATERT to ROTATE. */
10463 if (code == ROTATERT)
10465 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10466 code = ROTATE;
10467 count = bitsize - count;
10470 shift_mode = result_mode;
10471 if (shift_mode != mode)
10473 /* We only change the modes of scalar shifts. */
10474 int_mode = as_a <scalar_int_mode> (mode);
10475 int_result_mode = as_a <scalar_int_mode> (result_mode);
10476 shift_mode = try_widen_shift_mode (code, varop, count,
10477 int_result_mode, int_mode,
10478 outer_op, outer_const);
10481 scalar_int_mode shift_unit_mode
10482 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10484 /* Handle cases where the count is greater than the size of the mode
10485 minus 1. For ASHIFT, use the size minus one as the count (this can
10486 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10487 take the count modulo the size. For other shifts, the result is
10488 zero.
10490 Since these shifts are being produced by the compiler by combining
10491 multiple operations, each of which are defined, we know what the
10492 result is supposed to be. */
10494 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10496 if (code == ASHIFTRT)
10497 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10498 else if (code == ROTATE || code == ROTATERT)
10499 count %= GET_MODE_PRECISION (shift_unit_mode);
10500 else
10502 /* We can't simply return zero because there may be an
10503 outer op. */
10504 varop = const0_rtx;
10505 count = 0;
10506 break;
10510 /* If we discovered we had to complement VAROP, leave. Making a NOT
10511 here would cause an infinite loop. */
10512 if (complement_p)
10513 break;
10515 if (shift_mode == shift_unit_mode)
10517 /* An arithmetic right shift of a quantity known to be -1 or 0
10518 is a no-op. */
10519 if (code == ASHIFTRT
10520 && (num_sign_bit_copies (varop, shift_unit_mode)
10521 == GET_MODE_PRECISION (shift_unit_mode)))
10523 count = 0;
10524 break;
10527 /* If we are doing an arithmetic right shift and discarding all but
10528 the sign bit copies, this is equivalent to doing a shift by the
10529 bitsize minus one. Convert it into that shift because it will
10530 often allow other simplifications. */
10532 if (code == ASHIFTRT
10533 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10534 >= GET_MODE_PRECISION (shift_unit_mode)))
10535 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10537 /* We simplify the tests below and elsewhere by converting
10538 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10539 `make_compound_operation' will convert it to an ASHIFTRT for
10540 those machines (such as VAX) that don't have an LSHIFTRT. */
10541 if (code == ASHIFTRT
10542 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10543 && val_signbit_known_clear_p (shift_unit_mode,
10544 nonzero_bits (varop,
10545 shift_unit_mode)))
10546 code = LSHIFTRT;
10548 if (((code == LSHIFTRT
10549 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10550 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10551 || (code == ASHIFT
10552 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10553 && !((nonzero_bits (varop, shift_unit_mode) << count)
10554 & GET_MODE_MASK (shift_unit_mode))))
10555 && !side_effects_p (varop))
10556 varop = const0_rtx;
10559 switch (GET_CODE (varop))
10561 case SIGN_EXTEND:
10562 case ZERO_EXTEND:
10563 case SIGN_EXTRACT:
10564 case ZERO_EXTRACT:
10565 new_rtx = expand_compound_operation (varop);
10566 if (new_rtx != varop)
10568 varop = new_rtx;
10569 continue;
10571 break;
10573 case MEM:
10574 /* The following rules apply only to scalars. */
10575 if (shift_mode != shift_unit_mode)
10576 break;
10577 int_mode = as_a <scalar_int_mode> (mode);
10579 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10580 minus the width of a smaller mode, we can do this with a
10581 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10582 if ((code == ASHIFTRT || code == LSHIFTRT)
10583 && ! mode_dependent_address_p (XEXP (varop, 0),
10584 MEM_ADDR_SPACE (varop))
10585 && ! MEM_VOLATILE_P (varop)
10586 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10587 .exists (&tmode)))
10589 new_rtx = adjust_address_nv (varop, tmode,
10590 BYTES_BIG_ENDIAN ? 0
10591 : count / BITS_PER_UNIT);
10593 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10594 : ZERO_EXTEND, int_mode, new_rtx);
10595 count = 0;
10596 continue;
10598 break;
10600 case SUBREG:
10601 /* The following rules apply only to scalars. */
10602 if (shift_mode != shift_unit_mode)
10603 break;
10604 int_mode = as_a <scalar_int_mode> (mode);
10605 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10607 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10608 the same number of words as what we've seen so far. Then store
10609 the widest mode in MODE. */
10610 if (subreg_lowpart_p (varop)
10611 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10612 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10613 && (unsigned int) ((GET_MODE_SIZE (inner_mode)
10614 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10615 == mode_words
10616 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10618 varop = SUBREG_REG (varop);
10619 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10620 mode = inner_mode;
10621 continue;
10623 break;
10625 case MULT:
10626 /* Some machines use MULT instead of ASHIFT because MULT
10627 is cheaper. But it is still better on those machines to
10628 merge two shifts into one. */
10629 if (CONST_INT_P (XEXP (varop, 1))
10630 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10632 varop
10633 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10634 XEXP (varop, 0),
10635 GEN_INT (exact_log2 (
10636 UINTVAL (XEXP (varop, 1)))));
10637 continue;
10639 break;
10641 case UDIV:
10642 /* Similar, for when divides are cheaper. */
10643 if (CONST_INT_P (XEXP (varop, 1))
10644 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10646 varop
10647 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10648 XEXP (varop, 0),
10649 GEN_INT (exact_log2 (
10650 UINTVAL (XEXP (varop, 1)))));
10651 continue;
10653 break;
10655 case ASHIFTRT:
10656 /* If we are extracting just the sign bit of an arithmetic
10657 right shift, that shift is not needed. However, the sign
10658 bit of a wider mode may be different from what would be
10659 interpreted as the sign bit in a narrower mode, so, if
10660 the result is narrower, don't discard the shift. */
10661 if (code == LSHIFTRT
10662 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10663 && (GET_MODE_UNIT_BITSIZE (result_mode)
10664 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10666 varop = XEXP (varop, 0);
10667 continue;
10670 /* fall through */
10672 case LSHIFTRT:
10673 case ASHIFT:
10674 case ROTATE:
10675 /* The following rules apply only to scalars. */
10676 if (shift_mode != shift_unit_mode)
10677 break;
10678 int_mode = as_a <scalar_int_mode> (mode);
10679 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10680 int_result_mode = as_a <scalar_int_mode> (result_mode);
10682 /* Here we have two nested shifts. The result is usually the
10683 AND of a new shift with a mask. We compute the result below. */
10684 if (CONST_INT_P (XEXP (varop, 1))
10685 && INTVAL (XEXP (varop, 1)) >= 0
10686 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10687 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10688 && HWI_COMPUTABLE_MODE_P (int_mode))
10690 enum rtx_code first_code = GET_CODE (varop);
10691 unsigned int first_count = INTVAL (XEXP (varop, 1));
10692 unsigned HOST_WIDE_INT mask;
10693 rtx mask_rtx;
10695 /* We have one common special case. We can't do any merging if
10696 the inner code is an ASHIFTRT of a smaller mode. However, if
10697 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10698 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10699 we can convert it to
10700 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10701 This simplifies certain SIGN_EXTEND operations. */
10702 if (code == ASHIFT && first_code == ASHIFTRT
10703 && count == (GET_MODE_PRECISION (int_result_mode)
10704 - GET_MODE_PRECISION (int_varop_mode)))
10706 /* C3 has the low-order C1 bits zero. */
10708 mask = GET_MODE_MASK (int_mode)
10709 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10711 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10712 XEXP (varop, 0), mask);
10713 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10714 int_result_mode, varop, count);
10715 count = first_count;
10716 code = ASHIFTRT;
10717 continue;
10720 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10721 than C1 high-order bits equal to the sign bit, we can convert
10722 this to either an ASHIFT or an ASHIFTRT depending on the
10723 two counts.
10725 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10727 if (code == ASHIFTRT && first_code == ASHIFT
10728 && int_varop_mode == shift_unit_mode
10729 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10730 > first_count))
10732 varop = XEXP (varop, 0);
10733 count -= first_count;
10734 if (count < 0)
10736 count = -count;
10737 code = ASHIFT;
10740 continue;
10743 /* There are some cases we can't do. If CODE is ASHIFTRT,
10744 we can only do this if FIRST_CODE is also ASHIFTRT.
10746 We can't do the case when CODE is ROTATE and FIRST_CODE is
10747 ASHIFTRT.
10749 If the mode of this shift is not the mode of the outer shift,
10750 we can't do this if either shift is a right shift or ROTATE.
10752 Finally, we can't do any of these if the mode is too wide
10753 unless the codes are the same.
10755 Handle the case where the shift codes are the same
10756 first. */
10758 if (code == first_code)
10760 if (int_varop_mode != int_result_mode
10761 && (code == ASHIFTRT || code == LSHIFTRT
10762 || code == ROTATE))
10763 break;
10765 count += first_count;
10766 varop = XEXP (varop, 0);
10767 continue;
10770 if (code == ASHIFTRT
10771 || (code == ROTATE && first_code == ASHIFTRT)
10772 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10773 || (int_varop_mode != int_result_mode
10774 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10775 || first_code == ROTATE
10776 || code == ROTATE)))
10777 break;
10779 /* To compute the mask to apply after the shift, shift the
10780 nonzero bits of the inner shift the same way the
10781 outer shift will. */
10783 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10784 int_result_mode);
10786 mask_rtx
10787 = simplify_const_binary_operation (code, int_result_mode,
10788 mask_rtx, GEN_INT (count));
10790 /* Give up if we can't compute an outer operation to use. */
10791 if (mask_rtx == 0
10792 || !CONST_INT_P (mask_rtx)
10793 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10794 INTVAL (mask_rtx),
10795 int_result_mode, &complement_p))
10796 break;
10798 /* If the shifts are in the same direction, we add the
10799 counts. Otherwise, we subtract them. */
10800 if ((code == ASHIFTRT || code == LSHIFTRT)
10801 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10802 count += first_count;
10803 else
10804 count -= first_count;
10806 /* If COUNT is positive, the new shift is usually CODE,
10807 except for the two exceptions below, in which case it is
10808 FIRST_CODE. If the count is negative, FIRST_CODE should
10809 always be used */
10810 if (count > 0
10811 && ((first_code == ROTATE && code == ASHIFT)
10812 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10813 code = first_code;
10814 else if (count < 0)
10815 code = first_code, count = -count;
10817 varop = XEXP (varop, 0);
10818 continue;
10821 /* If we have (A << B << C) for any shift, we can convert this to
10822 (A << C << B). This wins if A is a constant. Only try this if
10823 B is not a constant. */
10825 else if (GET_CODE (varop) == code
10826 && CONST_INT_P (XEXP (varop, 0))
10827 && !CONST_INT_P (XEXP (varop, 1)))
10829 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10830 sure the result will be masked. See PR70222. */
10831 if (code == LSHIFTRT
10832 && int_mode != int_result_mode
10833 && !merge_outer_ops (&outer_op, &outer_const, AND,
10834 GET_MODE_MASK (int_result_mode)
10835 >> orig_count, int_result_mode,
10836 &complement_p))
10837 break;
10838 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10839 up outer sign extension (often left and right shift) is
10840 hardly more efficient than the original. See PR70429. */
10841 if (code == ASHIFTRT && int_mode != int_result_mode)
10842 break;
10844 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10845 XEXP (varop, 0),
10846 GEN_INT (count));
10847 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10848 count = 0;
10849 continue;
10851 break;
10853 case NOT:
10854 /* The following rules apply only to scalars. */
10855 if (shift_mode != shift_unit_mode)
10856 break;
10858 /* Make this fit the case below. */
10859 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10860 continue;
10862 case IOR:
10863 case AND:
10864 case XOR:
10865 /* The following rules apply only to scalars. */
10866 if (shift_mode != shift_unit_mode)
10867 break;
10868 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10869 int_result_mode = as_a <scalar_int_mode> (result_mode);
10871 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10872 with C the size of VAROP - 1 and the shift is logical if
10873 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10874 we have an (le X 0) operation. If we have an arithmetic shift
10875 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10876 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10878 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10879 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10880 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10881 && (code == LSHIFTRT || code == ASHIFTRT)
10882 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10883 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10885 count = 0;
10886 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10887 const0_rtx);
10889 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10890 varop = gen_rtx_NEG (int_varop_mode, varop);
10892 continue;
10895 /* If we have (shift (logical)), move the logical to the outside
10896 to allow it to possibly combine with another logical and the
10897 shift to combine with another shift. This also canonicalizes to
10898 what a ZERO_EXTRACT looks like. Also, some machines have
10899 (and (shift)) insns. */
10901 if (CONST_INT_P (XEXP (varop, 1))
10902 /* We can't do this if we have (ashiftrt (xor)) and the
10903 constant has its sign bit set in shift_unit_mode with
10904 shift_unit_mode wider than result_mode. */
10905 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10906 && int_result_mode != shift_unit_mode
10907 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10908 shift_unit_mode))
10909 && (new_rtx = simplify_const_binary_operation
10910 (code, int_result_mode,
10911 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10912 GEN_INT (count))) != 0
10913 && CONST_INT_P (new_rtx)
10914 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10915 INTVAL (new_rtx), int_result_mode,
10916 &complement_p))
10918 varop = XEXP (varop, 0);
10919 continue;
10922 /* If we can't do that, try to simplify the shift in each arm of the
10923 logical expression, make a new logical expression, and apply
10924 the inverse distributive law. This also can't be done for
10925 (ashiftrt (xor)) where we've widened the shift and the constant
10926 changes the sign bit. */
10927 if (CONST_INT_P (XEXP (varop, 1))
10928 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10929 && int_result_mode != shift_unit_mode
10930 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10931 shift_unit_mode)))
10933 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10934 XEXP (varop, 0), count);
10935 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10936 XEXP (varop, 1), count);
10938 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
10939 lhs, rhs);
10940 varop = apply_distributive_law (varop);
10942 count = 0;
10943 continue;
10945 break;
10947 case EQ:
10948 /* The following rules apply only to scalars. */
10949 if (shift_mode != shift_unit_mode)
10950 break;
10951 int_result_mode = as_a <scalar_int_mode> (result_mode);
10953 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10954 says that the sign bit can be tested, FOO has mode MODE, C is
10955 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10956 that may be nonzero. */
10957 if (code == LSHIFTRT
10958 && XEXP (varop, 1) == const0_rtx
10959 && GET_MODE (XEXP (varop, 0)) == int_result_mode
10960 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10961 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10962 && STORE_FLAG_VALUE == -1
10963 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10964 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10965 int_result_mode, &complement_p))
10967 varop = XEXP (varop, 0);
10968 count = 0;
10969 continue;
10971 break;
10973 case NEG:
10974 /* The following rules apply only to scalars. */
10975 if (shift_mode != shift_unit_mode)
10976 break;
10977 int_result_mode = as_a <scalar_int_mode> (result_mode);
10979 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10980 than the number of bits in the mode is equivalent to A. */
10981 if (code == LSHIFTRT
10982 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10983 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
10985 varop = XEXP (varop, 0);
10986 count = 0;
10987 continue;
10990 /* NEG commutes with ASHIFT since it is multiplication. Move the
10991 NEG outside to allow shifts to combine. */
10992 if (code == ASHIFT
10993 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
10994 int_result_mode, &complement_p))
10996 varop = XEXP (varop, 0);
10997 continue;
10999 break;
11001 case PLUS:
11002 /* The following rules apply only to scalars. */
11003 if (shift_mode != shift_unit_mode)
11004 break;
11005 int_result_mode = as_a <scalar_int_mode> (result_mode);
11007 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11008 is one less than the number of bits in the mode is
11009 equivalent to (xor A 1). */
11010 if (code == LSHIFTRT
11011 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11012 && XEXP (varop, 1) == constm1_rtx
11013 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11014 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11015 int_result_mode, &complement_p))
11017 count = 0;
11018 varop = XEXP (varop, 0);
11019 continue;
11022 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11023 that might be nonzero in BAR are those being shifted out and those
11024 bits are known zero in FOO, we can replace the PLUS with FOO.
11025 Similarly in the other operand order. This code occurs when
11026 we are computing the size of a variable-size array. */
11028 if ((code == ASHIFTRT || code == LSHIFTRT)
11029 && count < HOST_BITS_PER_WIDE_INT
11030 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11031 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11032 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11034 varop = XEXP (varop, 0);
11035 continue;
11037 else if ((code == ASHIFTRT || code == LSHIFTRT)
11038 && count < HOST_BITS_PER_WIDE_INT
11039 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11040 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11041 >> count)
11042 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11043 & nonzero_bits (XEXP (varop, 1), int_result_mode)))
11045 varop = XEXP (varop, 1);
11046 continue;
11049 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11050 if (code == ASHIFT
11051 && CONST_INT_P (XEXP (varop, 1))
11052 && (new_rtx = simplify_const_binary_operation
11053 (ASHIFT, int_result_mode,
11054 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11055 GEN_INT (count))) != 0
11056 && CONST_INT_P (new_rtx)
11057 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11058 INTVAL (new_rtx), int_result_mode,
11059 &complement_p))
11061 varop = XEXP (varop, 0);
11062 continue;
11065 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11066 signbit', and attempt to change the PLUS to an XOR and move it to
11067 the outer operation as is done above in the AND/IOR/XOR case
11068 leg for shift(logical). See details in logical handling above
11069 for reasoning in doing so. */
11070 if (code == LSHIFTRT
11071 && CONST_INT_P (XEXP (varop, 1))
11072 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11073 && (new_rtx = simplify_const_binary_operation
11074 (code, int_result_mode,
11075 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11076 GEN_INT (count))) != 0
11077 && CONST_INT_P (new_rtx)
11078 && merge_outer_ops (&outer_op, &outer_const, XOR,
11079 INTVAL (new_rtx), int_result_mode,
11080 &complement_p))
11082 varop = XEXP (varop, 0);
11083 continue;
11086 break;
11088 case MINUS:
11089 /* The following rules apply only to scalars. */
11090 if (shift_mode != shift_unit_mode)
11091 break;
11092 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11094 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11095 with C the size of VAROP - 1 and the shift is logical if
11096 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11097 we have a (gt X 0) operation. If the shift is arithmetic with
11098 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11099 we have a (neg (gt X 0)) operation. */
11101 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11102 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11103 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11104 && (code == LSHIFTRT || code == ASHIFTRT)
11105 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11106 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11107 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11109 count = 0;
11110 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11111 const0_rtx);
11113 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11114 varop = gen_rtx_NEG (int_varop_mode, varop);
11116 continue;
11118 break;
11120 case TRUNCATE:
11121 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11122 if the truncate does not affect the value. */
11123 if (code == LSHIFTRT
11124 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11125 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11126 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11127 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11128 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11130 rtx varop_inner = XEXP (varop, 0);
11132 varop_inner
11133 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11134 XEXP (varop_inner, 0),
11135 GEN_INT
11136 (count + INTVAL (XEXP (varop_inner, 1))));
11137 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11138 count = 0;
11139 continue;
11141 break;
11143 default:
11144 break;
11147 break;
11150 shift_mode = result_mode;
11151 if (shift_mode != mode)
11153 /* We only change the modes of scalar shifts. */
11154 int_mode = as_a <scalar_int_mode> (mode);
11155 int_result_mode = as_a <scalar_int_mode> (result_mode);
11156 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11157 int_mode, outer_op, outer_const);
11160 /* We have now finished analyzing the shift. The result should be
11161 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11162 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11163 to the result of the shift. OUTER_CONST is the relevant constant,
11164 but we must turn off all bits turned off in the shift. */
11166 if (outer_op == UNKNOWN
11167 && orig_code == code && orig_count == count
11168 && varop == orig_varop
11169 && shift_mode == GET_MODE (varop))
11170 return NULL_RTX;
11172 /* Make a SUBREG if necessary. If we can't make it, fail. */
11173 varop = gen_lowpart (shift_mode, varop);
11174 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11175 return NULL_RTX;
11177 /* If we have an outer operation and we just made a shift, it is
11178 possible that we could have simplified the shift were it not
11179 for the outer operation. So try to do the simplification
11180 recursively. */
11182 if (outer_op != UNKNOWN)
11183 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11184 else
11185 x = NULL_RTX;
11187 if (x == NULL_RTX)
11188 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
11190 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11191 turn off all the bits that the shift would have turned off. */
11192 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11193 /* We only change the modes of scalar shifts. */
11194 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11195 x, GET_MODE_MASK (result_mode) >> orig_count);
11197 /* Do the remainder of the processing in RESULT_MODE. */
11198 x = gen_lowpart_or_truncate (result_mode, x);
11200 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11201 operation. */
11202 if (complement_p)
11203 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11205 if (outer_op != UNKNOWN)
11207 int_result_mode = as_a <scalar_int_mode> (result_mode);
11209 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11210 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11211 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11213 if (outer_op == AND)
11214 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11215 else if (outer_op == SET)
11217 /* This means that we have determined that the result is
11218 equivalent to a constant. This should be rare. */
11219 if (!side_effects_p (x))
11220 x = GEN_INT (outer_const);
11222 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11223 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11224 else
11225 x = simplify_gen_binary (outer_op, int_result_mode, x,
11226 GEN_INT (outer_const));
11229 return x;
11232 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11233 The result of the shift is RESULT_MODE. If we cannot simplify it,
11234 return X or, if it is NULL, synthesize the expression with
11235 simplify_gen_binary. Otherwise, return a simplified value.
11237 The shift is normally computed in the widest mode we find in VAROP, as
11238 long as it isn't a different number of words than RESULT_MODE. Exceptions
11239 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11241 static rtx
11242 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11243 rtx varop, int count)
11245 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11246 if (tem)
11247 return tem;
11249 if (!x)
11250 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
11251 if (GET_MODE (x) != result_mode)
11252 x = gen_lowpart (result_mode, x);
11253 return x;
11257 /* A subroutine of recog_for_combine. See there for arguments and
11258 return value. */
11260 static int
11261 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11263 rtx pat = *pnewpat;
11264 rtx pat_without_clobbers;
11265 int insn_code_number;
11266 int num_clobbers_to_add = 0;
11267 int i;
11268 rtx notes = NULL_RTX;
11269 rtx old_notes, old_pat;
11270 int old_icode;
11272 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11273 we use to indicate that something didn't match. If we find such a
11274 thing, force rejection. */
11275 if (GET_CODE (pat) == PARALLEL)
11276 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11277 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11278 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11279 return -1;
11281 old_pat = PATTERN (insn);
11282 old_notes = REG_NOTES (insn);
11283 PATTERN (insn) = pat;
11284 REG_NOTES (insn) = NULL_RTX;
11286 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11287 if (dump_file && (dump_flags & TDF_DETAILS))
11289 if (insn_code_number < 0)
11290 fputs ("Failed to match this instruction:\n", dump_file);
11291 else
11292 fputs ("Successfully matched this instruction:\n", dump_file);
11293 print_rtl_single (dump_file, pat);
11296 /* If it isn't, there is the possibility that we previously had an insn
11297 that clobbered some register as a side effect, but the combined
11298 insn doesn't need to do that. So try once more without the clobbers
11299 unless this represents an ASM insn. */
11301 if (insn_code_number < 0 && ! check_asm_operands (pat)
11302 && GET_CODE (pat) == PARALLEL)
11304 int pos;
11306 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11307 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11309 if (i != pos)
11310 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11311 pos++;
11314 SUBST_INT (XVECLEN (pat, 0), pos);
11316 if (pos == 1)
11317 pat = XVECEXP (pat, 0, 0);
11319 PATTERN (insn) = pat;
11320 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11321 if (dump_file && (dump_flags & TDF_DETAILS))
11323 if (insn_code_number < 0)
11324 fputs ("Failed to match this instruction:\n", dump_file);
11325 else
11326 fputs ("Successfully matched this instruction:\n", dump_file);
11327 print_rtl_single (dump_file, pat);
11331 pat_without_clobbers = pat;
11333 PATTERN (insn) = old_pat;
11334 REG_NOTES (insn) = old_notes;
11336 /* Recognize all noop sets, these will be killed by followup pass. */
11337 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11338 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11340 /* If we had any clobbers to add, make a new pattern than contains
11341 them. Then check to make sure that all of them are dead. */
11342 if (num_clobbers_to_add)
11344 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11345 rtvec_alloc (GET_CODE (pat) == PARALLEL
11346 ? (XVECLEN (pat, 0)
11347 + num_clobbers_to_add)
11348 : num_clobbers_to_add + 1));
11350 if (GET_CODE (pat) == PARALLEL)
11351 for (i = 0; i < XVECLEN (pat, 0); i++)
11352 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11353 else
11354 XVECEXP (newpat, 0, 0) = pat;
11356 add_clobbers (newpat, insn_code_number);
11358 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11359 i < XVECLEN (newpat, 0); i++)
11361 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11362 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11363 return -1;
11364 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11366 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11367 notes = alloc_reg_note (REG_UNUSED,
11368 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11371 pat = newpat;
11374 if (insn_code_number >= 0
11375 && insn_code_number != NOOP_MOVE_INSN_CODE)
11377 old_pat = PATTERN (insn);
11378 old_notes = REG_NOTES (insn);
11379 old_icode = INSN_CODE (insn);
11380 PATTERN (insn) = pat;
11381 REG_NOTES (insn) = notes;
11382 INSN_CODE (insn) = insn_code_number;
11384 /* Allow targets to reject combined insn. */
11385 if (!targetm.legitimate_combined_insn (insn))
11387 if (dump_file && (dump_flags & TDF_DETAILS))
11388 fputs ("Instruction not appropriate for target.",
11389 dump_file);
11391 /* Callers expect recog_for_combine to strip
11392 clobbers from the pattern on failure. */
11393 pat = pat_without_clobbers;
11394 notes = NULL_RTX;
11396 insn_code_number = -1;
11399 PATTERN (insn) = old_pat;
11400 REG_NOTES (insn) = old_notes;
11401 INSN_CODE (insn) = old_icode;
11404 *pnewpat = pat;
11405 *pnotes = notes;
11407 return insn_code_number;
11410 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11411 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11412 Return whether anything was so changed. */
11414 static bool
11415 change_zero_ext (rtx pat)
11417 bool changed = false;
11418 rtx *src = &SET_SRC (pat);
11420 subrtx_ptr_iterator::array_type array;
11421 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11423 rtx x = **iter;
11424 scalar_int_mode mode, inner_mode;
11425 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11426 continue;
11427 int size;
11429 if (GET_CODE (x) == ZERO_EXTRACT
11430 && CONST_INT_P (XEXP (x, 1))
11431 && CONST_INT_P (XEXP (x, 2))
11432 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11433 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11435 size = INTVAL (XEXP (x, 1));
11437 int start = INTVAL (XEXP (x, 2));
11438 if (BITS_BIG_ENDIAN)
11439 start = GET_MODE_PRECISION (inner_mode) - size - start;
11441 if (start)
11442 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), GEN_INT (start));
11443 else
11444 x = XEXP (x, 0);
11445 if (mode != inner_mode)
11446 x = gen_lowpart_SUBREG (mode, x);
11448 else if (GET_CODE (x) == ZERO_EXTEND
11449 && GET_CODE (XEXP (x, 0)) == SUBREG
11450 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11451 && !paradoxical_subreg_p (XEXP (x, 0))
11452 && subreg_lowpart_p (XEXP (x, 0)))
11454 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11455 size = GET_MODE_PRECISION (inner_mode);
11456 x = SUBREG_REG (XEXP (x, 0));
11457 if (GET_MODE (x) != mode)
11458 x = gen_lowpart_SUBREG (mode, x);
11460 else if (GET_CODE (x) == ZERO_EXTEND
11461 && REG_P (XEXP (x, 0))
11462 && HARD_REGISTER_P (XEXP (x, 0))
11463 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11465 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11466 size = GET_MODE_PRECISION (inner_mode);
11467 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11469 else
11470 continue;
11472 if (!(GET_CODE (x) == LSHIFTRT
11473 && CONST_INT_P (XEXP (x, 1))
11474 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11476 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11477 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11480 SUBST (**iter, x);
11481 changed = true;
11484 if (changed)
11485 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11486 maybe_swap_commutative_operands (**iter);
11488 rtx *dst = &SET_DEST (pat);
11489 scalar_int_mode mode;
11490 if (GET_CODE (*dst) == ZERO_EXTRACT
11491 && REG_P (XEXP (*dst, 0))
11492 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11493 && CONST_INT_P (XEXP (*dst, 1))
11494 && CONST_INT_P (XEXP (*dst, 2)))
11496 rtx reg = XEXP (*dst, 0);
11497 int width = INTVAL (XEXP (*dst, 1));
11498 int offset = INTVAL (XEXP (*dst, 2));
11499 int reg_width = GET_MODE_PRECISION (mode);
11500 if (BITS_BIG_ENDIAN)
11501 offset = reg_width - width - offset;
11503 rtx x, y, z, w;
11504 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11505 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11506 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11507 if (offset)
11508 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11509 else
11510 y = SET_SRC (pat);
11511 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11512 w = gen_rtx_IOR (mode, x, z);
11513 SUBST (SET_DEST (pat), reg);
11514 SUBST (SET_SRC (pat), w);
11516 changed = true;
11519 return changed;
11522 /* Like recog, but we receive the address of a pointer to a new pattern.
11523 We try to match the rtx that the pointer points to.
11524 If that fails, we may try to modify or replace the pattern,
11525 storing the replacement into the same pointer object.
11527 Modifications include deletion or addition of CLOBBERs. If the
11528 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11529 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11530 (and undo if that fails).
11532 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11533 the CLOBBERs are placed.
11535 The value is the final insn code from the pattern ultimately matched,
11536 or -1. */
11538 static int
11539 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11541 rtx pat = *pnewpat;
11542 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11543 if (insn_code_number >= 0 || check_asm_operands (pat))
11544 return insn_code_number;
11546 void *marker = get_undo_marker ();
11547 bool changed = false;
11549 if (GET_CODE (pat) == SET)
11550 changed = change_zero_ext (pat);
11551 else if (GET_CODE (pat) == PARALLEL)
11553 int i;
11554 for (i = 0; i < XVECLEN (pat, 0); i++)
11556 rtx set = XVECEXP (pat, 0, i);
11557 if (GET_CODE (set) == SET)
11558 changed |= change_zero_ext (set);
11562 if (changed)
11564 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11566 if (insn_code_number < 0)
11567 undo_to_marker (marker);
11570 return insn_code_number;
11573 /* Like gen_lowpart_general but for use by combine. In combine it
11574 is not possible to create any new pseudoregs. However, it is
11575 safe to create invalid memory addresses, because combine will
11576 try to recognize them and all they will do is make the combine
11577 attempt fail.
11579 If for some reason this cannot do its job, an rtx
11580 (clobber (const_int 0)) is returned.
11581 An insn containing that will not be recognized. */
11583 static rtx
11584 gen_lowpart_for_combine (machine_mode omode, rtx x)
11586 machine_mode imode = GET_MODE (x);
11587 unsigned int osize = GET_MODE_SIZE (omode);
11588 unsigned int isize = GET_MODE_SIZE (imode);
11589 rtx result;
11591 if (omode == imode)
11592 return x;
11594 /* We can only support MODE being wider than a word if X is a
11595 constant integer or has a mode the same size. */
11596 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11597 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11598 goto fail;
11600 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11601 won't know what to do. So we will strip off the SUBREG here and
11602 process normally. */
11603 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11605 x = SUBREG_REG (x);
11607 /* For use in case we fall down into the address adjustments
11608 further below, we need to adjust the known mode and size of
11609 x; imode and isize, since we just adjusted x. */
11610 imode = GET_MODE (x);
11612 if (imode == omode)
11613 return x;
11615 isize = GET_MODE_SIZE (imode);
11618 result = gen_lowpart_common (omode, x);
11620 if (result)
11621 return result;
11623 if (MEM_P (x))
11625 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11626 address. */
11627 if (MEM_VOLATILE_P (x)
11628 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11629 goto fail;
11631 /* If we want to refer to something bigger than the original memref,
11632 generate a paradoxical subreg instead. That will force a reload
11633 of the original memref X. */
11634 if (paradoxical_subreg_p (omode, imode))
11635 return gen_rtx_SUBREG (omode, x, 0);
11637 HOST_WIDE_INT offset = byte_lowpart_offset (omode, imode);
11638 return adjust_address_nv (x, omode, offset);
11641 /* If X is a comparison operator, rewrite it in a new mode. This
11642 probably won't match, but may allow further simplifications. */
11643 else if (COMPARISON_P (x))
11644 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11646 /* If we couldn't simplify X any other way, just enclose it in a
11647 SUBREG. Normally, this SUBREG won't match, but some patterns may
11648 include an explicit SUBREG or we may simplify it further in combine. */
11649 else
11651 rtx res;
11653 if (imode == VOIDmode)
11655 imode = int_mode_for_mode (omode).require ();
11656 x = gen_lowpart_common (imode, x);
11657 if (x == NULL)
11658 goto fail;
11660 res = lowpart_subreg (omode, x, imode);
11661 if (res)
11662 return res;
11665 fail:
11666 return gen_rtx_CLOBBER (omode, const0_rtx);
11669 /* Try to simplify a comparison between OP0 and a constant OP1,
11670 where CODE is the comparison code that will be tested, into a
11671 (CODE OP0 const0_rtx) form.
11673 The result is a possibly different comparison code to use.
11674 *POP1 may be updated. */
11676 static enum rtx_code
11677 simplify_compare_const (enum rtx_code code, machine_mode mode,
11678 rtx op0, rtx *pop1)
11680 scalar_int_mode int_mode;
11681 HOST_WIDE_INT const_op = INTVAL (*pop1);
11683 /* Get the constant we are comparing against and turn off all bits
11684 not on in our mode. */
11685 if (mode != VOIDmode)
11686 const_op = trunc_int_for_mode (const_op, mode);
11688 /* If we are comparing against a constant power of two and the value
11689 being compared can only have that single bit nonzero (e.g., it was
11690 `and'ed with that bit), we can replace this with a comparison
11691 with zero. */
11692 if (const_op
11693 && (code == EQ || code == NE || code == GE || code == GEU
11694 || code == LT || code == LTU)
11695 && is_a <scalar_int_mode> (mode, &int_mode)
11696 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11697 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11698 && (nonzero_bits (op0, int_mode)
11699 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11701 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11702 const_op = 0;
11705 /* Similarly, if we are comparing a value known to be either -1 or
11706 0 with -1, change it to the opposite comparison against zero. */
11707 if (const_op == -1
11708 && (code == EQ || code == NE || code == GT || code == LE
11709 || code == GEU || code == LTU)
11710 && is_a <scalar_int_mode> (mode, &int_mode)
11711 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11713 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11714 const_op = 0;
11717 /* Do some canonicalizations based on the comparison code. We prefer
11718 comparisons against zero and then prefer equality comparisons.
11719 If we can reduce the size of a constant, we will do that too. */
11720 switch (code)
11722 case LT:
11723 /* < C is equivalent to <= (C - 1) */
11724 if (const_op > 0)
11726 const_op -= 1;
11727 code = LE;
11728 /* ... fall through to LE case below. */
11729 gcc_fallthrough ();
11731 else
11732 break;
11734 case LE:
11735 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11736 if (const_op < 0)
11738 const_op += 1;
11739 code = LT;
11742 /* If we are doing a <= 0 comparison on a value known to have
11743 a zero sign bit, we can replace this with == 0. */
11744 else if (const_op == 0
11745 && is_a <scalar_int_mode> (mode, &int_mode)
11746 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11747 && (nonzero_bits (op0, int_mode)
11748 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11749 == 0)
11750 code = EQ;
11751 break;
11753 case GE:
11754 /* >= C is equivalent to > (C - 1). */
11755 if (const_op > 0)
11757 const_op -= 1;
11758 code = GT;
11759 /* ... fall through to GT below. */
11760 gcc_fallthrough ();
11762 else
11763 break;
11765 case GT:
11766 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11767 if (const_op < 0)
11769 const_op += 1;
11770 code = GE;
11773 /* If we are doing a > 0 comparison on a value known to have
11774 a zero sign bit, we can replace this with != 0. */
11775 else if (const_op == 0
11776 && is_a <scalar_int_mode> (mode, &int_mode)
11777 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11778 && (nonzero_bits (op0, int_mode)
11779 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11780 == 0)
11781 code = NE;
11782 break;
11784 case LTU:
11785 /* < C is equivalent to <= (C - 1). */
11786 if (const_op > 0)
11788 const_op -= 1;
11789 code = LEU;
11790 /* ... fall through ... */
11791 gcc_fallthrough ();
11793 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11794 else if (is_a <scalar_int_mode> (mode, &int_mode)
11795 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11796 && ((unsigned HOST_WIDE_INT) const_op
11797 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11799 const_op = 0;
11800 code = GE;
11801 break;
11803 else
11804 break;
11806 case LEU:
11807 /* unsigned <= 0 is equivalent to == 0 */
11808 if (const_op == 0)
11809 code = EQ;
11810 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11811 else if (is_a <scalar_int_mode> (mode, &int_mode)
11812 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11813 && ((unsigned HOST_WIDE_INT) const_op
11814 == ((HOST_WIDE_INT_1U
11815 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11817 const_op = 0;
11818 code = GE;
11820 break;
11822 case GEU:
11823 /* >= C is equivalent to > (C - 1). */
11824 if (const_op > 1)
11826 const_op -= 1;
11827 code = GTU;
11828 /* ... fall through ... */
11829 gcc_fallthrough ();
11832 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11833 else if (is_a <scalar_int_mode> (mode, &int_mode)
11834 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11835 && ((unsigned HOST_WIDE_INT) const_op
11836 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11838 const_op = 0;
11839 code = LT;
11840 break;
11842 else
11843 break;
11845 case GTU:
11846 /* unsigned > 0 is equivalent to != 0 */
11847 if (const_op == 0)
11848 code = NE;
11849 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11850 else if (is_a <scalar_int_mode> (mode, &int_mode)
11851 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11852 && ((unsigned HOST_WIDE_INT) const_op
11853 == (HOST_WIDE_INT_1U
11854 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11856 const_op = 0;
11857 code = LT;
11859 break;
11861 default:
11862 break;
11865 *pop1 = GEN_INT (const_op);
11866 return code;
11869 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11870 comparison code that will be tested.
11872 The result is a possibly different comparison code to use. *POP0 and
11873 *POP1 may be updated.
11875 It is possible that we might detect that a comparison is either always
11876 true or always false. However, we do not perform general constant
11877 folding in combine, so this knowledge isn't useful. Such tautologies
11878 should have been detected earlier. Hence we ignore all such cases. */
11880 static enum rtx_code
11881 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11883 rtx op0 = *pop0;
11884 rtx op1 = *pop1;
11885 rtx tem, tem1;
11886 int i;
11887 scalar_int_mode mode, inner_mode, tmode;
11888 opt_scalar_int_mode tmode_iter;
11890 /* Try a few ways of applying the same transformation to both operands. */
11891 while (1)
11893 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11894 so check specially. */
11895 if (!WORD_REGISTER_OPERATIONS
11896 && code != GTU && code != GEU && code != LTU && code != LEU
11897 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11898 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11899 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11900 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11901 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11902 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11903 && (is_a <scalar_int_mode>
11904 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11905 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11906 && CONST_INT_P (XEXP (op0, 1))
11907 && XEXP (op0, 1) == XEXP (op1, 1)
11908 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11909 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11910 && (INTVAL (XEXP (op0, 1))
11911 == (GET_MODE_PRECISION (mode)
11912 - GET_MODE_PRECISION (inner_mode))))
11914 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11915 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11918 /* If both operands are the same constant shift, see if we can ignore the
11919 shift. We can if the shift is a rotate or if the bits shifted out of
11920 this shift are known to be zero for both inputs and if the type of
11921 comparison is compatible with the shift. */
11922 if (GET_CODE (op0) == GET_CODE (op1)
11923 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11924 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11925 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11926 && (code != GT && code != LT && code != GE && code != LE))
11927 || (GET_CODE (op0) == ASHIFTRT
11928 && (code != GTU && code != LTU
11929 && code != GEU && code != LEU)))
11930 && CONST_INT_P (XEXP (op0, 1))
11931 && INTVAL (XEXP (op0, 1)) >= 0
11932 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11933 && XEXP (op0, 1) == XEXP (op1, 1))
11935 machine_mode mode = GET_MODE (op0);
11936 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11937 int shift_count = INTVAL (XEXP (op0, 1));
11939 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11940 mask &= (mask >> shift_count) << shift_count;
11941 else if (GET_CODE (op0) == ASHIFT)
11942 mask = (mask & (mask << shift_count)) >> shift_count;
11944 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11945 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11946 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11947 else
11948 break;
11951 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11952 SUBREGs are of the same mode, and, in both cases, the AND would
11953 be redundant if the comparison was done in the narrower mode,
11954 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11955 and the operand's possibly nonzero bits are 0xffffff01; in that case
11956 if we only care about QImode, we don't need the AND). This case
11957 occurs if the output mode of an scc insn is not SImode and
11958 STORE_FLAG_VALUE == 1 (e.g., the 386).
11960 Similarly, check for a case where the AND's are ZERO_EXTEND
11961 operations from some narrower mode even though a SUBREG is not
11962 present. */
11964 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11965 && CONST_INT_P (XEXP (op0, 1))
11966 && CONST_INT_P (XEXP (op1, 1)))
11968 rtx inner_op0 = XEXP (op0, 0);
11969 rtx inner_op1 = XEXP (op1, 0);
11970 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11971 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11972 int changed = 0;
11974 if (paradoxical_subreg_p (inner_op0)
11975 && GET_CODE (inner_op1) == SUBREG
11976 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
11977 && (GET_MODE (SUBREG_REG (inner_op0))
11978 == GET_MODE (SUBREG_REG (inner_op1)))
11979 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11980 GET_MODE (SUBREG_REG (inner_op0)))))
11981 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11982 GET_MODE (SUBREG_REG (inner_op1))))))
11984 op0 = SUBREG_REG (inner_op0);
11985 op1 = SUBREG_REG (inner_op1);
11987 /* The resulting comparison is always unsigned since we masked
11988 off the original sign bit. */
11989 code = unsigned_condition (code);
11991 changed = 1;
11994 else if (c0 == c1)
11995 FOR_EACH_MODE_UNTIL (tmode,
11996 as_a <scalar_int_mode> (GET_MODE (op0)))
11997 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11999 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12000 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12001 code = unsigned_condition (code);
12002 changed = 1;
12003 break;
12006 if (! changed)
12007 break;
12010 /* If both operands are NOT, we can strip off the outer operation
12011 and adjust the comparison code for swapped operands; similarly for
12012 NEG, except that this must be an equality comparison. */
12013 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12014 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12015 && (code == EQ || code == NE)))
12016 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12018 else
12019 break;
12022 /* If the first operand is a constant, swap the operands and adjust the
12023 comparison code appropriately, but don't do this if the second operand
12024 is already a constant integer. */
12025 if (swap_commutative_operands_p (op0, op1))
12027 std::swap (op0, op1);
12028 code = swap_condition (code);
12031 /* We now enter a loop during which we will try to simplify the comparison.
12032 For the most part, we only are concerned with comparisons with zero,
12033 but some things may really be comparisons with zero but not start
12034 out looking that way. */
12036 while (CONST_INT_P (op1))
12038 machine_mode raw_mode = GET_MODE (op0);
12039 scalar_int_mode int_mode;
12040 int equality_comparison_p;
12041 int sign_bit_comparison_p;
12042 int unsigned_comparison_p;
12043 HOST_WIDE_INT const_op;
12045 /* We only want to handle integral modes. This catches VOIDmode,
12046 CCmode, and the floating-point modes. An exception is that we
12047 can handle VOIDmode if OP0 is a COMPARE or a comparison
12048 operation. */
12050 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12051 && ! (raw_mode == VOIDmode
12052 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12053 break;
12055 /* Try to simplify the compare to constant, possibly changing the
12056 comparison op, and/or changing op1 to zero. */
12057 code = simplify_compare_const (code, raw_mode, op0, &op1);
12058 const_op = INTVAL (op1);
12060 /* Compute some predicates to simplify code below. */
12062 equality_comparison_p = (code == EQ || code == NE);
12063 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12064 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12065 || code == GEU);
12067 /* If this is a sign bit comparison and we can do arithmetic in
12068 MODE, say that we will only be needing the sign bit of OP0. */
12069 if (sign_bit_comparison_p
12070 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12071 && HWI_COMPUTABLE_MODE_P (int_mode))
12072 op0 = force_to_mode (op0, int_mode,
12073 HOST_WIDE_INT_1U
12074 << (GET_MODE_PRECISION (int_mode) - 1),
12077 if (COMPARISON_P (op0))
12079 /* We can't do anything if OP0 is a condition code value, rather
12080 than an actual data value. */
12081 if (const_op != 0
12082 || CC0_P (XEXP (op0, 0))
12083 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12084 break;
12086 /* Get the two operands being compared. */
12087 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12088 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12089 else
12090 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12092 /* Check for the cases where we simply want the result of the
12093 earlier test or the opposite of that result. */
12094 if (code == NE || code == EQ
12095 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12096 && (code == LT || code == GE)))
12098 enum rtx_code new_code;
12099 if (code == LT || code == NE)
12100 new_code = GET_CODE (op0);
12101 else
12102 new_code = reversed_comparison_code (op0, NULL);
12104 if (new_code != UNKNOWN)
12106 code = new_code;
12107 op0 = tem;
12108 op1 = tem1;
12109 continue;
12112 break;
12115 if (raw_mode == VOIDmode)
12116 break;
12117 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12119 /* Now try cases based on the opcode of OP0. If none of the cases
12120 does a "continue", we exit this loop immediately after the
12121 switch. */
12123 unsigned int mode_width = GET_MODE_PRECISION (mode);
12124 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12125 switch (GET_CODE (op0))
12127 case ZERO_EXTRACT:
12128 /* If we are extracting a single bit from a variable position in
12129 a constant that has only a single bit set and are comparing it
12130 with zero, we can convert this into an equality comparison
12131 between the position and the location of the single bit. */
12132 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12133 have already reduced the shift count modulo the word size. */
12134 if (!SHIFT_COUNT_TRUNCATED
12135 && CONST_INT_P (XEXP (op0, 0))
12136 && XEXP (op0, 1) == const1_rtx
12137 && equality_comparison_p && const_op == 0
12138 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12140 if (BITS_BIG_ENDIAN)
12141 i = BITS_PER_WORD - 1 - i;
12143 op0 = XEXP (op0, 2);
12144 op1 = GEN_INT (i);
12145 const_op = i;
12147 /* Result is nonzero iff shift count is equal to I. */
12148 code = reverse_condition (code);
12149 continue;
12152 /* fall through */
12154 case SIGN_EXTRACT:
12155 tem = expand_compound_operation (op0);
12156 if (tem != op0)
12158 op0 = tem;
12159 continue;
12161 break;
12163 case NOT:
12164 /* If testing for equality, we can take the NOT of the constant. */
12165 if (equality_comparison_p
12166 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12168 op0 = XEXP (op0, 0);
12169 op1 = tem;
12170 continue;
12173 /* If just looking at the sign bit, reverse the sense of the
12174 comparison. */
12175 if (sign_bit_comparison_p)
12177 op0 = XEXP (op0, 0);
12178 code = (code == GE ? LT : GE);
12179 continue;
12181 break;
12183 case NEG:
12184 /* If testing for equality, we can take the NEG of the constant. */
12185 if (equality_comparison_p
12186 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12188 op0 = XEXP (op0, 0);
12189 op1 = tem;
12190 continue;
12193 /* The remaining cases only apply to comparisons with zero. */
12194 if (const_op != 0)
12195 break;
12197 /* When X is ABS or is known positive,
12198 (neg X) is < 0 if and only if X != 0. */
12200 if (sign_bit_comparison_p
12201 && (GET_CODE (XEXP (op0, 0)) == ABS
12202 || (mode_width <= HOST_BITS_PER_WIDE_INT
12203 && (nonzero_bits (XEXP (op0, 0), mode)
12204 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12205 == 0)))
12207 op0 = XEXP (op0, 0);
12208 code = (code == LT ? NE : EQ);
12209 continue;
12212 /* If we have NEG of something whose two high-order bits are the
12213 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12214 if (num_sign_bit_copies (op0, mode) >= 2)
12216 op0 = XEXP (op0, 0);
12217 code = swap_condition (code);
12218 continue;
12220 break;
12222 case ROTATE:
12223 /* If we are testing equality and our count is a constant, we
12224 can perform the inverse operation on our RHS. */
12225 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12226 && (tem = simplify_binary_operation (ROTATERT, mode,
12227 op1, XEXP (op0, 1))) != 0)
12229 op0 = XEXP (op0, 0);
12230 op1 = tem;
12231 continue;
12234 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12235 a particular bit. Convert it to an AND of a constant of that
12236 bit. This will be converted into a ZERO_EXTRACT. */
12237 if (const_op == 0 && sign_bit_comparison_p
12238 && CONST_INT_P (XEXP (op0, 1))
12239 && mode_width <= HOST_BITS_PER_WIDE_INT)
12241 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12242 (HOST_WIDE_INT_1U
12243 << (mode_width - 1
12244 - INTVAL (XEXP (op0, 1)))));
12245 code = (code == LT ? NE : EQ);
12246 continue;
12249 /* Fall through. */
12251 case ABS:
12252 /* ABS is ignorable inside an equality comparison with zero. */
12253 if (const_op == 0 && equality_comparison_p)
12255 op0 = XEXP (op0, 0);
12256 continue;
12258 break;
12260 case SIGN_EXTEND:
12261 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12262 (compare FOO CONST) if CONST fits in FOO's mode and we
12263 are either testing inequality or have an unsigned
12264 comparison with ZERO_EXTEND or a signed comparison with
12265 SIGN_EXTEND. But don't do it if we don't have a compare
12266 insn of the given mode, since we'd have to revert it
12267 later on, and then we wouldn't know whether to sign- or
12268 zero-extend. */
12269 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12270 && ! unsigned_comparison_p
12271 && HWI_COMPUTABLE_MODE_P (mode)
12272 && trunc_int_for_mode (const_op, mode) == const_op
12273 && have_insn_for (COMPARE, mode))
12275 op0 = XEXP (op0, 0);
12276 continue;
12278 break;
12280 case SUBREG:
12281 /* Check for the case where we are comparing A - C1 with C2, that is
12283 (subreg:MODE (plus (A) (-C1))) op (C2)
12285 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12286 comparison in the wider mode. One of the following two conditions
12287 must be true in order for this to be valid:
12289 1. The mode extension results in the same bit pattern being added
12290 on both sides and the comparison is equality or unsigned. As
12291 C2 has been truncated to fit in MODE, the pattern can only be
12292 all 0s or all 1s.
12294 2. The mode extension results in the sign bit being copied on
12295 each side.
12297 The difficulty here is that we have predicates for A but not for
12298 (A - C1) so we need to check that C1 is within proper bounds so
12299 as to perturbate A as little as possible. */
12301 if (mode_width <= HOST_BITS_PER_WIDE_INT
12302 && subreg_lowpart_p (op0)
12303 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12304 &inner_mode)
12305 && GET_MODE_PRECISION (inner_mode) > mode_width
12306 && GET_CODE (SUBREG_REG (op0)) == PLUS
12307 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12309 rtx a = XEXP (SUBREG_REG (op0), 0);
12310 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12312 if ((c1 > 0
12313 && (unsigned HOST_WIDE_INT) c1
12314 < HOST_WIDE_INT_1U << (mode_width - 1)
12315 && (equality_comparison_p || unsigned_comparison_p)
12316 /* (A - C1) zero-extends if it is positive and sign-extends
12317 if it is negative, C2 both zero- and sign-extends. */
12318 && ((0 == (nonzero_bits (a, inner_mode)
12319 & ~GET_MODE_MASK (mode))
12320 && const_op >= 0)
12321 /* (A - C1) sign-extends if it is positive and 1-extends
12322 if it is negative, C2 both sign- and 1-extends. */
12323 || (num_sign_bit_copies (a, inner_mode)
12324 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12325 - mode_width)
12326 && const_op < 0)))
12327 || ((unsigned HOST_WIDE_INT) c1
12328 < HOST_WIDE_INT_1U << (mode_width - 2)
12329 /* (A - C1) always sign-extends, like C2. */
12330 && num_sign_bit_copies (a, inner_mode)
12331 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12332 - (mode_width - 1))))
12334 op0 = SUBREG_REG (op0);
12335 continue;
12339 /* If the inner mode is narrower and we are extracting the low part,
12340 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12341 if (paradoxical_subreg_p (op0))
12343 else if (subreg_lowpart_p (op0)
12344 && GET_MODE_CLASS (mode) == MODE_INT
12345 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12346 && (code == NE || code == EQ)
12347 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12348 && !paradoxical_subreg_p (op0)
12349 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12350 & ~GET_MODE_MASK (mode)) == 0)
12352 /* Remove outer subregs that don't do anything. */
12353 tem = gen_lowpart (inner_mode, op1);
12355 if ((nonzero_bits (tem, inner_mode)
12356 & ~GET_MODE_MASK (mode)) == 0)
12358 op0 = SUBREG_REG (op0);
12359 op1 = tem;
12360 continue;
12362 break;
12364 else
12365 break;
12367 /* FALLTHROUGH */
12369 case ZERO_EXTEND:
12370 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12371 && (unsigned_comparison_p || equality_comparison_p)
12372 && HWI_COMPUTABLE_MODE_P (mode)
12373 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12374 && const_op >= 0
12375 && have_insn_for (COMPARE, mode))
12377 op0 = XEXP (op0, 0);
12378 continue;
12380 break;
12382 case PLUS:
12383 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12384 this for equality comparisons due to pathological cases involving
12385 overflows. */
12386 if (equality_comparison_p
12387 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12388 op1, XEXP (op0, 1))))
12390 op0 = XEXP (op0, 0);
12391 op1 = tem;
12392 continue;
12395 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12396 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12397 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12399 op0 = XEXP (XEXP (op0, 0), 0);
12400 code = (code == LT ? EQ : NE);
12401 continue;
12403 break;
12405 case MINUS:
12406 /* We used to optimize signed comparisons against zero, but that
12407 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12408 arrive here as equality comparisons, or (GEU, LTU) are
12409 optimized away. No need to special-case them. */
12411 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12412 (eq B (minus A C)), whichever simplifies. We can only do
12413 this for equality comparisons due to pathological cases involving
12414 overflows. */
12415 if (equality_comparison_p
12416 && 0 != (tem = simplify_binary_operation (PLUS, mode,
12417 XEXP (op0, 1), op1)))
12419 op0 = XEXP (op0, 0);
12420 op1 = tem;
12421 continue;
12424 if (equality_comparison_p
12425 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12426 XEXP (op0, 0), op1)))
12428 op0 = XEXP (op0, 1);
12429 op1 = tem;
12430 continue;
12433 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12434 of bits in X minus 1, is one iff X > 0. */
12435 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12436 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12437 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12438 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12440 op0 = XEXP (op0, 1);
12441 code = (code == GE ? LE : GT);
12442 continue;
12444 break;
12446 case XOR:
12447 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12448 if C is zero or B is a constant. */
12449 if (equality_comparison_p
12450 && 0 != (tem = simplify_binary_operation (XOR, mode,
12451 XEXP (op0, 1), op1)))
12453 op0 = XEXP (op0, 0);
12454 op1 = tem;
12455 continue;
12457 break;
12460 case IOR:
12461 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12462 iff X <= 0. */
12463 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12464 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12465 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12467 op0 = XEXP (op0, 1);
12468 code = (code == GE ? GT : LE);
12469 continue;
12471 break;
12473 case AND:
12474 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12475 will be converted to a ZERO_EXTRACT later. */
12476 if (const_op == 0 && equality_comparison_p
12477 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12478 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12480 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12481 XEXP (XEXP (op0, 0), 1));
12482 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12483 continue;
12486 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12487 zero and X is a comparison and C1 and C2 describe only bits set
12488 in STORE_FLAG_VALUE, we can compare with X. */
12489 if (const_op == 0 && equality_comparison_p
12490 && mode_width <= HOST_BITS_PER_WIDE_INT
12491 && CONST_INT_P (XEXP (op0, 1))
12492 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12493 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12494 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12495 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12497 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12498 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12499 if ((~STORE_FLAG_VALUE & mask) == 0
12500 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12501 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12502 && COMPARISON_P (tem))))
12504 op0 = XEXP (XEXP (op0, 0), 0);
12505 continue;
12509 /* If we are doing an equality comparison of an AND of a bit equal
12510 to the sign bit, replace this with a LT or GE comparison of
12511 the underlying value. */
12512 if (equality_comparison_p
12513 && const_op == 0
12514 && CONST_INT_P (XEXP (op0, 1))
12515 && mode_width <= HOST_BITS_PER_WIDE_INT
12516 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12517 == HOST_WIDE_INT_1U << (mode_width - 1)))
12519 op0 = XEXP (op0, 0);
12520 code = (code == EQ ? GE : LT);
12521 continue;
12524 /* If this AND operation is really a ZERO_EXTEND from a narrower
12525 mode, the constant fits within that mode, and this is either an
12526 equality or unsigned comparison, try to do this comparison in
12527 the narrower mode.
12529 Note that in:
12531 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12532 -> (ne:DI (reg:SI 4) (const_int 0))
12534 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12535 known to hold a value of the required mode the
12536 transformation is invalid. */
12537 if ((equality_comparison_p || unsigned_comparison_p)
12538 && CONST_INT_P (XEXP (op0, 1))
12539 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12540 & GET_MODE_MASK (mode))
12541 + 1)) >= 0
12542 && const_op >> i == 0
12543 && int_mode_for_size (i, 1).exists (&tmode))
12545 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12546 continue;
12549 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12550 fits in both M1 and M2 and the SUBREG is either paradoxical
12551 or represents the low part, permute the SUBREG and the AND
12552 and try again. */
12553 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12554 && CONST_INT_P (XEXP (op0, 1)))
12556 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12557 /* Require an integral mode, to avoid creating something like
12558 (AND:SF ...). */
12559 if ((is_a <scalar_int_mode>
12560 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12561 /* It is unsafe to commute the AND into the SUBREG if the
12562 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12563 not defined. As originally written the upper bits
12564 have a defined value due to the AND operation.
12565 However, if we commute the AND inside the SUBREG then
12566 they no longer have defined values and the meaning of
12567 the code has been changed.
12568 Also C1 should not change value in the smaller mode,
12569 see PR67028 (a positive C1 can become negative in the
12570 smaller mode, so that the AND does no longer mask the
12571 upper bits). */
12572 && ((WORD_REGISTER_OPERATIONS
12573 && mode_width > GET_MODE_PRECISION (tmode)
12574 && mode_width <= BITS_PER_WORD
12575 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12576 || (mode_width <= GET_MODE_PRECISION (tmode)
12577 && subreg_lowpart_p (XEXP (op0, 0))))
12578 && mode_width <= HOST_BITS_PER_WIDE_INT
12579 && HWI_COMPUTABLE_MODE_P (tmode)
12580 && (c1 & ~mask) == 0
12581 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12582 && c1 != mask
12583 && c1 != GET_MODE_MASK (tmode))
12585 op0 = simplify_gen_binary (AND, tmode,
12586 SUBREG_REG (XEXP (op0, 0)),
12587 gen_int_mode (c1, tmode));
12588 op0 = gen_lowpart (mode, op0);
12589 continue;
12593 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12594 if (const_op == 0 && equality_comparison_p
12595 && XEXP (op0, 1) == const1_rtx
12596 && GET_CODE (XEXP (op0, 0)) == NOT)
12598 op0 = simplify_and_const_int (NULL_RTX, mode,
12599 XEXP (XEXP (op0, 0), 0), 1);
12600 code = (code == NE ? EQ : NE);
12601 continue;
12604 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12605 (eq (and (lshiftrt X) 1) 0).
12606 Also handle the case where (not X) is expressed using xor. */
12607 if (const_op == 0 && equality_comparison_p
12608 && XEXP (op0, 1) == const1_rtx
12609 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12611 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12612 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12614 if (GET_CODE (shift_op) == NOT
12615 || (GET_CODE (shift_op) == XOR
12616 && CONST_INT_P (XEXP (shift_op, 1))
12617 && CONST_INT_P (shift_count)
12618 && HWI_COMPUTABLE_MODE_P (mode)
12619 && (UINTVAL (XEXP (shift_op, 1))
12620 == HOST_WIDE_INT_1U
12621 << INTVAL (shift_count))))
12624 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12625 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12626 code = (code == NE ? EQ : NE);
12627 continue;
12630 break;
12632 case ASHIFT:
12633 /* If we have (compare (ashift FOO N) (const_int C)) and
12634 the high order N bits of FOO (N+1 if an inequality comparison)
12635 are known to be zero, we can do this by comparing FOO with C
12636 shifted right N bits so long as the low-order N bits of C are
12637 zero. */
12638 if (CONST_INT_P (XEXP (op0, 1))
12639 && INTVAL (XEXP (op0, 1)) >= 0
12640 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12641 < HOST_BITS_PER_WIDE_INT)
12642 && (((unsigned HOST_WIDE_INT) const_op
12643 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12644 - 1)) == 0)
12645 && mode_width <= HOST_BITS_PER_WIDE_INT
12646 && (nonzero_bits (XEXP (op0, 0), mode)
12647 & ~(mask >> (INTVAL (XEXP (op0, 1))
12648 + ! equality_comparison_p))) == 0)
12650 /* We must perform a logical shift, not an arithmetic one,
12651 as we want the top N bits of C to be zero. */
12652 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12654 temp >>= INTVAL (XEXP (op0, 1));
12655 op1 = gen_int_mode (temp, mode);
12656 op0 = XEXP (op0, 0);
12657 continue;
12660 /* If we are doing a sign bit comparison, it means we are testing
12661 a particular bit. Convert it to the appropriate AND. */
12662 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12663 && mode_width <= HOST_BITS_PER_WIDE_INT)
12665 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12666 (HOST_WIDE_INT_1U
12667 << (mode_width - 1
12668 - INTVAL (XEXP (op0, 1)))));
12669 code = (code == LT ? NE : EQ);
12670 continue;
12673 /* If this an equality comparison with zero and we are shifting
12674 the low bit to the sign bit, we can convert this to an AND of the
12675 low-order bit. */
12676 if (const_op == 0 && equality_comparison_p
12677 && CONST_INT_P (XEXP (op0, 1))
12678 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12680 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12681 continue;
12683 break;
12685 case ASHIFTRT:
12686 /* If this is an equality comparison with zero, we can do this
12687 as a logical shift, which might be much simpler. */
12688 if (equality_comparison_p && const_op == 0
12689 && CONST_INT_P (XEXP (op0, 1)))
12691 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12692 XEXP (op0, 0),
12693 INTVAL (XEXP (op0, 1)));
12694 continue;
12697 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12698 do the comparison in a narrower mode. */
12699 if (! unsigned_comparison_p
12700 && CONST_INT_P (XEXP (op0, 1))
12701 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12702 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12703 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12704 .exists (&tmode))
12705 && (((unsigned HOST_WIDE_INT) const_op
12706 + (GET_MODE_MASK (tmode) >> 1) + 1)
12707 <= GET_MODE_MASK (tmode)))
12709 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12710 continue;
12713 /* Likewise if OP0 is a PLUS of a sign extension with a
12714 constant, which is usually represented with the PLUS
12715 between the shifts. */
12716 if (! unsigned_comparison_p
12717 && CONST_INT_P (XEXP (op0, 1))
12718 && GET_CODE (XEXP (op0, 0)) == PLUS
12719 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12720 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12721 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12722 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12723 .exists (&tmode))
12724 && (((unsigned HOST_WIDE_INT) const_op
12725 + (GET_MODE_MASK (tmode) >> 1) + 1)
12726 <= GET_MODE_MASK (tmode)))
12728 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12729 rtx add_const = XEXP (XEXP (op0, 0), 1);
12730 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12731 add_const, XEXP (op0, 1));
12733 op0 = simplify_gen_binary (PLUS, tmode,
12734 gen_lowpart (tmode, inner),
12735 new_const);
12736 continue;
12739 /* FALLTHROUGH */
12740 case LSHIFTRT:
12741 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12742 the low order N bits of FOO are known to be zero, we can do this
12743 by comparing FOO with C shifted left N bits so long as no
12744 overflow occurs. Even if the low order N bits of FOO aren't known
12745 to be zero, if the comparison is >= or < we can use the same
12746 optimization and for > or <= by setting all the low
12747 order N bits in the comparison constant. */
12748 if (CONST_INT_P (XEXP (op0, 1))
12749 && INTVAL (XEXP (op0, 1)) > 0
12750 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12751 && mode_width <= HOST_BITS_PER_WIDE_INT
12752 && (((unsigned HOST_WIDE_INT) const_op
12753 + (GET_CODE (op0) != LSHIFTRT
12754 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12755 + 1)
12756 : 0))
12757 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12759 unsigned HOST_WIDE_INT low_bits
12760 = (nonzero_bits (XEXP (op0, 0), mode)
12761 & ((HOST_WIDE_INT_1U
12762 << INTVAL (XEXP (op0, 1))) - 1));
12763 if (low_bits == 0 || !equality_comparison_p)
12765 /* If the shift was logical, then we must make the condition
12766 unsigned. */
12767 if (GET_CODE (op0) == LSHIFTRT)
12768 code = unsigned_condition (code);
12770 const_op = (unsigned HOST_WIDE_INT) const_op
12771 << INTVAL (XEXP (op0, 1));
12772 if (low_bits != 0
12773 && (code == GT || code == GTU
12774 || code == LE || code == LEU))
12775 const_op
12776 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12777 op1 = GEN_INT (const_op);
12778 op0 = XEXP (op0, 0);
12779 continue;
12783 /* If we are using this shift to extract just the sign bit, we
12784 can replace this with an LT or GE comparison. */
12785 if (const_op == 0
12786 && (equality_comparison_p || sign_bit_comparison_p)
12787 && CONST_INT_P (XEXP (op0, 1))
12788 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12790 op0 = XEXP (op0, 0);
12791 code = (code == NE || code == GT ? LT : GE);
12792 continue;
12794 break;
12796 default:
12797 break;
12800 break;
12803 /* Now make any compound operations involved in this comparison. Then,
12804 check for an outmost SUBREG on OP0 that is not doing anything or is
12805 paradoxical. The latter transformation must only be performed when
12806 it is known that the "extra" bits will be the same in op0 and op1 or
12807 that they don't matter. There are three cases to consider:
12809 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12810 care bits and we can assume they have any convenient value. So
12811 making the transformation is safe.
12813 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12814 In this case the upper bits of op0 are undefined. We should not make
12815 the simplification in that case as we do not know the contents of
12816 those bits.
12818 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12819 In that case we know those bits are zeros or ones. We must also be
12820 sure that they are the same as the upper bits of op1.
12822 We can never remove a SUBREG for a non-equality comparison because
12823 the sign bit is in a different place in the underlying object. */
12825 rtx_code op0_mco_code = SET;
12826 if (op1 == const0_rtx)
12827 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12829 op0 = make_compound_operation (op0, op0_mco_code);
12830 op1 = make_compound_operation (op1, SET);
12832 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12833 && is_int_mode (GET_MODE (op0), &mode)
12834 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12835 && (code == NE || code == EQ))
12837 if (paradoxical_subreg_p (op0))
12839 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12840 implemented. */
12841 if (REG_P (SUBREG_REG (op0)))
12843 op0 = SUBREG_REG (op0);
12844 op1 = gen_lowpart (inner_mode, op1);
12847 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12848 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12849 & ~GET_MODE_MASK (mode)) == 0)
12851 tem = gen_lowpart (inner_mode, op1);
12853 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12854 op0 = SUBREG_REG (op0), op1 = tem;
12858 /* We now do the opposite procedure: Some machines don't have compare
12859 insns in all modes. If OP0's mode is an integer mode smaller than a
12860 word and we can't do a compare in that mode, see if there is a larger
12861 mode for which we can do the compare. There are a number of cases in
12862 which we can use the wider mode. */
12864 if (is_int_mode (GET_MODE (op0), &mode)
12865 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12866 && ! have_insn_for (COMPARE, mode))
12867 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12869 tmode = tmode_iter.require ();
12870 if (!HWI_COMPUTABLE_MODE_P (tmode))
12871 break;
12872 if (have_insn_for (COMPARE, tmode))
12874 int zero_extended;
12876 /* If this is a test for negative, we can make an explicit
12877 test of the sign bit. Test this first so we can use
12878 a paradoxical subreg to extend OP0. */
12880 if (op1 == const0_rtx && (code == LT || code == GE)
12881 && HWI_COMPUTABLE_MODE_P (mode))
12883 unsigned HOST_WIDE_INT sign
12884 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12885 op0 = simplify_gen_binary (AND, tmode,
12886 gen_lowpart (tmode, op0),
12887 gen_int_mode (sign, tmode));
12888 code = (code == LT) ? NE : EQ;
12889 break;
12892 /* If the only nonzero bits in OP0 and OP1 are those in the
12893 narrower mode and this is an equality or unsigned comparison,
12894 we can use the wider mode. Similarly for sign-extended
12895 values, in which case it is true for all comparisons. */
12896 zero_extended = ((code == EQ || code == NE
12897 || code == GEU || code == GTU
12898 || code == LEU || code == LTU)
12899 && (nonzero_bits (op0, tmode)
12900 & ~GET_MODE_MASK (mode)) == 0
12901 && ((CONST_INT_P (op1)
12902 || (nonzero_bits (op1, tmode)
12903 & ~GET_MODE_MASK (mode)) == 0)));
12905 if (zero_extended
12906 || ((num_sign_bit_copies (op0, tmode)
12907 > (unsigned int) (GET_MODE_PRECISION (tmode)
12908 - GET_MODE_PRECISION (mode)))
12909 && (num_sign_bit_copies (op1, tmode)
12910 > (unsigned int) (GET_MODE_PRECISION (tmode)
12911 - GET_MODE_PRECISION (mode)))))
12913 /* If OP0 is an AND and we don't have an AND in MODE either,
12914 make a new AND in the proper mode. */
12915 if (GET_CODE (op0) == AND
12916 && !have_insn_for (AND, mode))
12917 op0 = simplify_gen_binary (AND, tmode,
12918 gen_lowpart (tmode,
12919 XEXP (op0, 0)),
12920 gen_lowpart (tmode,
12921 XEXP (op0, 1)));
12922 else
12924 if (zero_extended)
12926 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
12927 op0, mode);
12928 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
12929 op1, mode);
12931 else
12933 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
12934 op0, mode);
12935 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
12936 op1, mode);
12938 break;
12944 /* We may have changed the comparison operands. Re-canonicalize. */
12945 if (swap_commutative_operands_p (op0, op1))
12947 std::swap (op0, op1);
12948 code = swap_condition (code);
12951 /* If this machine only supports a subset of valid comparisons, see if we
12952 can convert an unsupported one into a supported one. */
12953 target_canonicalize_comparison (&code, &op0, &op1, 0);
12955 *pop0 = op0;
12956 *pop1 = op1;
12958 return code;
12961 /* Utility function for record_value_for_reg. Count number of
12962 rtxs in X. */
12963 static int
12964 count_rtxs (rtx x)
12966 enum rtx_code code = GET_CODE (x);
12967 const char *fmt;
12968 int i, j, ret = 1;
12970 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12971 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12973 rtx x0 = XEXP (x, 0);
12974 rtx x1 = XEXP (x, 1);
12976 if (x0 == x1)
12977 return 1 + 2 * count_rtxs (x0);
12979 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12980 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12981 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12982 return 2 + 2 * count_rtxs (x0)
12983 + count_rtxs (x == XEXP (x1, 0)
12984 ? XEXP (x1, 1) : XEXP (x1, 0));
12986 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12987 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12988 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12989 return 2 + 2 * count_rtxs (x1)
12990 + count_rtxs (x == XEXP (x0, 0)
12991 ? XEXP (x0, 1) : XEXP (x0, 0));
12994 fmt = GET_RTX_FORMAT (code);
12995 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12996 if (fmt[i] == 'e')
12997 ret += count_rtxs (XEXP (x, i));
12998 else if (fmt[i] == 'E')
12999 for (j = 0; j < XVECLEN (x, i); j++)
13000 ret += count_rtxs (XVECEXP (x, i, j));
13002 return ret;
13005 /* Utility function for following routine. Called when X is part of a value
13006 being stored into last_set_value. Sets last_set_table_tick
13007 for each register mentioned. Similar to mention_regs in cse.c */
13009 static void
13010 update_table_tick (rtx x)
13012 enum rtx_code code = GET_CODE (x);
13013 const char *fmt = GET_RTX_FORMAT (code);
13014 int i, j;
13016 if (code == REG)
13018 unsigned int regno = REGNO (x);
13019 unsigned int endregno = END_REGNO (x);
13020 unsigned int r;
13022 for (r = regno; r < endregno; r++)
13024 reg_stat_type *rsp = &reg_stat[r];
13025 rsp->last_set_table_tick = label_tick;
13028 return;
13031 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13032 if (fmt[i] == 'e')
13034 /* Check for identical subexpressions. If x contains
13035 identical subexpression we only have to traverse one of
13036 them. */
13037 if (i == 0 && ARITHMETIC_P (x))
13039 /* Note that at this point x1 has already been
13040 processed. */
13041 rtx x0 = XEXP (x, 0);
13042 rtx x1 = XEXP (x, 1);
13044 /* If x0 and x1 are identical then there is no need to
13045 process x0. */
13046 if (x0 == x1)
13047 break;
13049 /* If x0 is identical to a subexpression of x1 then while
13050 processing x1, x0 has already been processed. Thus we
13051 are done with x. */
13052 if (ARITHMETIC_P (x1)
13053 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13054 break;
13056 /* If x1 is identical to a subexpression of x0 then we
13057 still have to process the rest of x0. */
13058 if (ARITHMETIC_P (x0)
13059 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13061 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13062 break;
13066 update_table_tick (XEXP (x, i));
13068 else if (fmt[i] == 'E')
13069 for (j = 0; j < XVECLEN (x, i); j++)
13070 update_table_tick (XVECEXP (x, i, j));
13073 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13074 are saying that the register is clobbered and we no longer know its
13075 value. If INSN is zero, don't update reg_stat[].last_set; this is
13076 only permitted with VALUE also zero and is used to invalidate the
13077 register. */
13079 static void
13080 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13082 unsigned int regno = REGNO (reg);
13083 unsigned int endregno = END_REGNO (reg);
13084 unsigned int i;
13085 reg_stat_type *rsp;
13087 /* If VALUE contains REG and we have a previous value for REG, substitute
13088 the previous value. */
13089 if (value && insn && reg_overlap_mentioned_p (reg, value))
13091 rtx tem;
13093 /* Set things up so get_last_value is allowed to see anything set up to
13094 our insn. */
13095 subst_low_luid = DF_INSN_LUID (insn);
13096 tem = get_last_value (reg);
13098 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13099 it isn't going to be useful and will take a lot of time to process,
13100 so just use the CLOBBER. */
13102 if (tem)
13104 if (ARITHMETIC_P (tem)
13105 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13106 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13107 tem = XEXP (tem, 0);
13108 else if (count_occurrences (value, reg, 1) >= 2)
13110 /* If there are two or more occurrences of REG in VALUE,
13111 prevent the value from growing too much. */
13112 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13113 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13116 value = replace_rtx (copy_rtx (value), reg, tem);
13120 /* For each register modified, show we don't know its value, that
13121 we don't know about its bitwise content, that its value has been
13122 updated, and that we don't know the location of the death of the
13123 register. */
13124 for (i = regno; i < endregno; i++)
13126 rsp = &reg_stat[i];
13128 if (insn)
13129 rsp->last_set = insn;
13131 rsp->last_set_value = 0;
13132 rsp->last_set_mode = VOIDmode;
13133 rsp->last_set_nonzero_bits = 0;
13134 rsp->last_set_sign_bit_copies = 0;
13135 rsp->last_death = 0;
13136 rsp->truncated_to_mode = VOIDmode;
13139 /* Mark registers that are being referenced in this value. */
13140 if (value)
13141 update_table_tick (value);
13143 /* Now update the status of each register being set.
13144 If someone is using this register in this block, set this register
13145 to invalid since we will get confused between the two lives in this
13146 basic block. This makes using this register always invalid. In cse, we
13147 scan the table to invalidate all entries using this register, but this
13148 is too much work for us. */
13150 for (i = regno; i < endregno; i++)
13152 rsp = &reg_stat[i];
13153 rsp->last_set_label = label_tick;
13154 if (!insn
13155 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13156 rsp->last_set_invalid = 1;
13157 else
13158 rsp->last_set_invalid = 0;
13161 /* The value being assigned might refer to X (like in "x++;"). In that
13162 case, we must replace it with (clobber (const_int 0)) to prevent
13163 infinite loops. */
13164 rsp = &reg_stat[regno];
13165 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13167 value = copy_rtx (value);
13168 if (!get_last_value_validate (&value, insn, label_tick, 1))
13169 value = 0;
13172 /* For the main register being modified, update the value, the mode, the
13173 nonzero bits, and the number of sign bit copies. */
13175 rsp->last_set_value = value;
13177 if (value)
13179 machine_mode mode = GET_MODE (reg);
13180 subst_low_luid = DF_INSN_LUID (insn);
13181 rsp->last_set_mode = mode;
13182 if (GET_MODE_CLASS (mode) == MODE_INT
13183 && HWI_COMPUTABLE_MODE_P (mode))
13184 mode = nonzero_bits_mode;
13185 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13186 rsp->last_set_sign_bit_copies
13187 = num_sign_bit_copies (value, GET_MODE (reg));
13191 /* Called via note_stores from record_dead_and_set_regs to handle one
13192 SET or CLOBBER in an insn. DATA is the instruction in which the
13193 set is occurring. */
13195 static void
13196 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13198 rtx_insn *record_dead_insn = (rtx_insn *) data;
13200 if (GET_CODE (dest) == SUBREG)
13201 dest = SUBREG_REG (dest);
13203 if (!record_dead_insn)
13205 if (REG_P (dest))
13206 record_value_for_reg (dest, NULL, NULL_RTX);
13207 return;
13210 if (REG_P (dest))
13212 /* If we are setting the whole register, we know its value. Otherwise
13213 show that we don't know the value. We can handle SUBREG in
13214 some cases. */
13215 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13216 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13217 else if (GET_CODE (setter) == SET
13218 && GET_CODE (SET_DEST (setter)) == SUBREG
13219 && SUBREG_REG (SET_DEST (setter)) == dest
13220 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
13221 && subreg_lowpart_p (SET_DEST (setter)))
13222 record_value_for_reg (dest, record_dead_insn,
13223 gen_lowpart (GET_MODE (dest),
13224 SET_SRC (setter)));
13225 else
13226 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13228 else if (MEM_P (dest)
13229 /* Ignore pushes, they clobber nothing. */
13230 && ! push_operand (dest, GET_MODE (dest)))
13231 mem_last_set = DF_INSN_LUID (record_dead_insn);
13234 /* Update the records of when each REG was most recently set or killed
13235 for the things done by INSN. This is the last thing done in processing
13236 INSN in the combiner loop.
13238 We update reg_stat[], in particular fields last_set, last_set_value,
13239 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13240 last_death, and also the similar information mem_last_set (which insn
13241 most recently modified memory) and last_call_luid (which insn was the
13242 most recent subroutine call). */
13244 static void
13245 record_dead_and_set_regs (rtx_insn *insn)
13247 rtx link;
13248 unsigned int i;
13250 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13252 if (REG_NOTE_KIND (link) == REG_DEAD
13253 && REG_P (XEXP (link, 0)))
13255 unsigned int regno = REGNO (XEXP (link, 0));
13256 unsigned int endregno = END_REGNO (XEXP (link, 0));
13258 for (i = regno; i < endregno; i++)
13260 reg_stat_type *rsp;
13262 rsp = &reg_stat[i];
13263 rsp->last_death = insn;
13266 else if (REG_NOTE_KIND (link) == REG_INC)
13267 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13270 if (CALL_P (insn))
13272 hard_reg_set_iterator hrsi;
13273 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13275 reg_stat_type *rsp;
13277 rsp = &reg_stat[i];
13278 rsp->last_set_invalid = 1;
13279 rsp->last_set = insn;
13280 rsp->last_set_value = 0;
13281 rsp->last_set_mode = VOIDmode;
13282 rsp->last_set_nonzero_bits = 0;
13283 rsp->last_set_sign_bit_copies = 0;
13284 rsp->last_death = 0;
13285 rsp->truncated_to_mode = VOIDmode;
13288 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13290 /* We can't combine into a call pattern. Remember, though, that
13291 the return value register is set at this LUID. We could
13292 still replace a register with the return value from the
13293 wrong subroutine call! */
13294 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13296 else
13297 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13300 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13301 register present in the SUBREG, so for each such SUBREG go back and
13302 adjust nonzero and sign bit information of the registers that are
13303 known to have some zero/sign bits set.
13305 This is needed because when combine blows the SUBREGs away, the
13306 information on zero/sign bits is lost and further combines can be
13307 missed because of that. */
13309 static void
13310 record_promoted_value (rtx_insn *insn, rtx subreg)
13312 struct insn_link *links;
13313 rtx set;
13314 unsigned int regno = REGNO (SUBREG_REG (subreg));
13315 machine_mode mode = GET_MODE (subreg);
13317 if (!HWI_COMPUTABLE_MODE_P (mode))
13318 return;
13320 for (links = LOG_LINKS (insn); links;)
13322 reg_stat_type *rsp;
13324 insn = links->insn;
13325 set = single_set (insn);
13327 if (! set || !REG_P (SET_DEST (set))
13328 || REGNO (SET_DEST (set)) != regno
13329 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13331 links = links->next;
13332 continue;
13335 rsp = &reg_stat[regno];
13336 if (rsp->last_set == insn)
13338 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13339 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13342 if (REG_P (SET_SRC (set)))
13344 regno = REGNO (SET_SRC (set));
13345 links = LOG_LINKS (insn);
13347 else
13348 break;
13352 /* Check if X, a register, is known to contain a value already
13353 truncated to MODE. In this case we can use a subreg to refer to
13354 the truncated value even though in the generic case we would need
13355 an explicit truncation. */
13357 static bool
13358 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13360 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13361 machine_mode truncated = rsp->truncated_to_mode;
13363 if (truncated == 0
13364 || rsp->truncation_label < label_tick_ebb_start)
13365 return false;
13366 if (!partial_subreg_p (mode, truncated))
13367 return true;
13368 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13369 return true;
13370 return false;
13373 /* If X is a hard reg or a subreg record the mode that the register is
13374 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13375 able to turn a truncate into a subreg using this information. Return true
13376 if traversing X is complete. */
13378 static bool
13379 record_truncated_value (rtx x)
13381 machine_mode truncated_mode;
13382 reg_stat_type *rsp;
13384 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13386 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13387 truncated_mode = GET_MODE (x);
13389 if (!partial_subreg_p (truncated_mode, original_mode))
13390 return true;
13392 truncated_mode = GET_MODE (x);
13393 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13394 return true;
13396 x = SUBREG_REG (x);
13398 /* ??? For hard-regs we now record everything. We might be able to
13399 optimize this using last_set_mode. */
13400 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13401 truncated_mode = GET_MODE (x);
13402 else
13403 return false;
13405 rsp = &reg_stat[REGNO (x)];
13406 if (rsp->truncated_to_mode == 0
13407 || rsp->truncation_label < label_tick_ebb_start
13408 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13410 rsp->truncated_to_mode = truncated_mode;
13411 rsp->truncation_label = label_tick;
13414 return true;
13417 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13418 the modes they are used in. This can help truning TRUNCATEs into
13419 SUBREGs. */
13421 static void
13422 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13424 subrtx_var_iterator::array_type array;
13425 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13426 if (record_truncated_value (*iter))
13427 iter.skip_subrtxes ();
13430 /* Scan X for promoted SUBREGs. For each one found,
13431 note what it implies to the registers used in it. */
13433 static void
13434 check_promoted_subreg (rtx_insn *insn, rtx x)
13436 if (GET_CODE (x) == SUBREG
13437 && SUBREG_PROMOTED_VAR_P (x)
13438 && REG_P (SUBREG_REG (x)))
13439 record_promoted_value (insn, x);
13440 else
13442 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13443 int i, j;
13445 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13446 switch (format[i])
13448 case 'e':
13449 check_promoted_subreg (insn, XEXP (x, i));
13450 break;
13451 case 'V':
13452 case 'E':
13453 if (XVEC (x, i) != 0)
13454 for (j = 0; j < XVECLEN (x, i); j++)
13455 check_promoted_subreg (insn, XVECEXP (x, i, j));
13456 break;
13461 /* Verify that all the registers and memory references mentioned in *LOC are
13462 still valid. *LOC was part of a value set in INSN when label_tick was
13463 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13464 the invalid references with (clobber (const_int 0)) and return 1. This
13465 replacement is useful because we often can get useful information about
13466 the form of a value (e.g., if it was produced by a shift that always
13467 produces -1 or 0) even though we don't know exactly what registers it
13468 was produced from. */
13470 static int
13471 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13473 rtx x = *loc;
13474 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13475 int len = GET_RTX_LENGTH (GET_CODE (x));
13476 int i, j;
13478 if (REG_P (x))
13480 unsigned int regno = REGNO (x);
13481 unsigned int endregno = END_REGNO (x);
13482 unsigned int j;
13484 for (j = regno; j < endregno; j++)
13486 reg_stat_type *rsp = &reg_stat[j];
13487 if (rsp->last_set_invalid
13488 /* If this is a pseudo-register that was only set once and not
13489 live at the beginning of the function, it is always valid. */
13490 || (! (regno >= FIRST_PSEUDO_REGISTER
13491 && regno < reg_n_sets_max
13492 && REG_N_SETS (regno) == 1
13493 && (!REGNO_REG_SET_P
13494 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13495 regno)))
13496 && rsp->last_set_label > tick))
13498 if (replace)
13499 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13500 return replace;
13504 return 1;
13506 /* If this is a memory reference, make sure that there were no stores after
13507 it that might have clobbered the value. We don't have alias info, so we
13508 assume any store invalidates it. Moreover, we only have local UIDs, so
13509 we also assume that there were stores in the intervening basic blocks. */
13510 else if (MEM_P (x) && !MEM_READONLY_P (x)
13511 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13513 if (replace)
13514 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13515 return replace;
13518 for (i = 0; i < len; i++)
13520 if (fmt[i] == 'e')
13522 /* Check for identical subexpressions. If x contains
13523 identical subexpression we only have to traverse one of
13524 them. */
13525 if (i == 1 && ARITHMETIC_P (x))
13527 /* Note that at this point x0 has already been checked
13528 and found valid. */
13529 rtx x0 = XEXP (x, 0);
13530 rtx x1 = XEXP (x, 1);
13532 /* If x0 and x1 are identical then x is also valid. */
13533 if (x0 == x1)
13534 return 1;
13536 /* If x1 is identical to a subexpression of x0 then
13537 while checking x0, x1 has already been checked. Thus
13538 it is valid and so as x. */
13539 if (ARITHMETIC_P (x0)
13540 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13541 return 1;
13543 /* If x0 is identical to a subexpression of x1 then x is
13544 valid iff the rest of x1 is valid. */
13545 if (ARITHMETIC_P (x1)
13546 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13547 return
13548 get_last_value_validate (&XEXP (x1,
13549 x0 == XEXP (x1, 0) ? 1 : 0),
13550 insn, tick, replace);
13553 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13554 replace) == 0)
13555 return 0;
13557 else if (fmt[i] == 'E')
13558 for (j = 0; j < XVECLEN (x, i); j++)
13559 if (get_last_value_validate (&XVECEXP (x, i, j),
13560 insn, tick, replace) == 0)
13561 return 0;
13564 /* If we haven't found a reason for it to be invalid, it is valid. */
13565 return 1;
13568 /* Get the last value assigned to X, if known. Some registers
13569 in the value may be replaced with (clobber (const_int 0)) if their value
13570 is known longer known reliably. */
13572 static rtx
13573 get_last_value (const_rtx x)
13575 unsigned int regno;
13576 rtx value;
13577 reg_stat_type *rsp;
13579 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13580 then convert it to the desired mode. If this is a paradoxical SUBREG,
13581 we cannot predict what values the "extra" bits might have. */
13582 if (GET_CODE (x) == SUBREG
13583 && subreg_lowpart_p (x)
13584 && !paradoxical_subreg_p (x)
13585 && (value = get_last_value (SUBREG_REG (x))) != 0)
13586 return gen_lowpart (GET_MODE (x), value);
13588 if (!REG_P (x))
13589 return 0;
13591 regno = REGNO (x);
13592 rsp = &reg_stat[regno];
13593 value = rsp->last_set_value;
13595 /* If we don't have a value, or if it isn't for this basic block and
13596 it's either a hard register, set more than once, or it's a live
13597 at the beginning of the function, return 0.
13599 Because if it's not live at the beginning of the function then the reg
13600 is always set before being used (is never used without being set).
13601 And, if it's set only once, and it's always set before use, then all
13602 uses must have the same last value, even if it's not from this basic
13603 block. */
13605 if (value == 0
13606 || (rsp->last_set_label < label_tick_ebb_start
13607 && (regno < FIRST_PSEUDO_REGISTER
13608 || regno >= reg_n_sets_max
13609 || REG_N_SETS (regno) != 1
13610 || REGNO_REG_SET_P
13611 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13612 return 0;
13614 /* If the value was set in a later insn than the ones we are processing,
13615 we can't use it even if the register was only set once. */
13616 if (rsp->last_set_label == label_tick
13617 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13618 return 0;
13620 /* If fewer bits were set than what we are asked for now, we cannot use
13621 the value. */
13622 if (GET_MODE_PRECISION (rsp->last_set_mode)
13623 < GET_MODE_PRECISION (GET_MODE (x)))
13624 return 0;
13626 /* If the value has all its registers valid, return it. */
13627 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13628 return value;
13630 /* Otherwise, make a copy and replace any invalid register with
13631 (clobber (const_int 0)). If that fails for some reason, return 0. */
13633 value = copy_rtx (value);
13634 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13635 return value;
13637 return 0;
13640 /* Return nonzero if expression X refers to a REG or to memory
13641 that is set in an instruction more recent than FROM_LUID. */
13643 static int
13644 use_crosses_set_p (const_rtx x, int from_luid)
13646 const char *fmt;
13647 int i;
13648 enum rtx_code code = GET_CODE (x);
13650 if (code == REG)
13652 unsigned int regno = REGNO (x);
13653 unsigned endreg = END_REGNO (x);
13655 #ifdef PUSH_ROUNDING
13656 /* Don't allow uses of the stack pointer to be moved,
13657 because we don't know whether the move crosses a push insn. */
13658 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13659 return 1;
13660 #endif
13661 for (; regno < endreg; regno++)
13663 reg_stat_type *rsp = &reg_stat[regno];
13664 if (rsp->last_set
13665 && rsp->last_set_label == label_tick
13666 && DF_INSN_LUID (rsp->last_set) > from_luid)
13667 return 1;
13669 return 0;
13672 if (code == MEM && mem_last_set > from_luid)
13673 return 1;
13675 fmt = GET_RTX_FORMAT (code);
13677 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13679 if (fmt[i] == 'E')
13681 int j;
13682 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13683 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13684 return 1;
13686 else if (fmt[i] == 'e'
13687 && use_crosses_set_p (XEXP (x, i), from_luid))
13688 return 1;
13690 return 0;
13693 /* Define three variables used for communication between the following
13694 routines. */
13696 static unsigned int reg_dead_regno, reg_dead_endregno;
13697 static int reg_dead_flag;
13699 /* Function called via note_stores from reg_dead_at_p.
13701 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13702 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13704 static void
13705 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13707 unsigned int regno, endregno;
13709 if (!REG_P (dest))
13710 return;
13712 regno = REGNO (dest);
13713 endregno = END_REGNO (dest);
13714 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13715 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13718 /* Return nonzero if REG is known to be dead at INSN.
13720 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13721 referencing REG, it is dead. If we hit a SET referencing REG, it is
13722 live. Otherwise, see if it is live or dead at the start of the basic
13723 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13724 must be assumed to be always live. */
13726 static int
13727 reg_dead_at_p (rtx reg, rtx_insn *insn)
13729 basic_block block;
13730 unsigned int i;
13732 /* Set variables for reg_dead_at_p_1. */
13733 reg_dead_regno = REGNO (reg);
13734 reg_dead_endregno = END_REGNO (reg);
13736 reg_dead_flag = 0;
13738 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13739 we allow the machine description to decide whether use-and-clobber
13740 patterns are OK. */
13741 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13743 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13744 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13745 return 0;
13748 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13749 beginning of basic block. */
13750 block = BLOCK_FOR_INSN (insn);
13751 for (;;)
13753 if (INSN_P (insn))
13755 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13756 return 1;
13758 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13759 if (reg_dead_flag)
13760 return reg_dead_flag == 1 ? 1 : 0;
13762 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13763 return 1;
13766 if (insn == BB_HEAD (block))
13767 break;
13769 insn = PREV_INSN (insn);
13772 /* Look at live-in sets for the basic block that we were in. */
13773 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13774 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13775 return 0;
13777 return 1;
13780 /* Note hard registers in X that are used. */
13782 static void
13783 mark_used_regs_combine (rtx x)
13785 RTX_CODE code = GET_CODE (x);
13786 unsigned int regno;
13787 int i;
13789 switch (code)
13791 case LABEL_REF:
13792 case SYMBOL_REF:
13793 case CONST:
13794 CASE_CONST_ANY:
13795 case PC:
13796 case ADDR_VEC:
13797 case ADDR_DIFF_VEC:
13798 case ASM_INPUT:
13799 /* CC0 must die in the insn after it is set, so we don't need to take
13800 special note of it here. */
13801 case CC0:
13802 return;
13804 case CLOBBER:
13805 /* If we are clobbering a MEM, mark any hard registers inside the
13806 address as used. */
13807 if (MEM_P (XEXP (x, 0)))
13808 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13809 return;
13811 case REG:
13812 regno = REGNO (x);
13813 /* A hard reg in a wide mode may really be multiple registers.
13814 If so, mark all of them just like the first. */
13815 if (regno < FIRST_PSEUDO_REGISTER)
13817 /* None of this applies to the stack, frame or arg pointers. */
13818 if (regno == STACK_POINTER_REGNUM
13819 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13820 && regno == HARD_FRAME_POINTER_REGNUM)
13821 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13822 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13823 || regno == FRAME_POINTER_REGNUM)
13824 return;
13826 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13828 return;
13830 case SET:
13832 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13833 the address. */
13834 rtx testreg = SET_DEST (x);
13836 while (GET_CODE (testreg) == SUBREG
13837 || GET_CODE (testreg) == ZERO_EXTRACT
13838 || GET_CODE (testreg) == STRICT_LOW_PART)
13839 testreg = XEXP (testreg, 0);
13841 if (MEM_P (testreg))
13842 mark_used_regs_combine (XEXP (testreg, 0));
13844 mark_used_regs_combine (SET_SRC (x));
13846 return;
13848 default:
13849 break;
13852 /* Recursively scan the operands of this expression. */
13855 const char *fmt = GET_RTX_FORMAT (code);
13857 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13859 if (fmt[i] == 'e')
13860 mark_used_regs_combine (XEXP (x, i));
13861 else if (fmt[i] == 'E')
13863 int j;
13865 for (j = 0; j < XVECLEN (x, i); j++)
13866 mark_used_regs_combine (XVECEXP (x, i, j));
13872 /* Remove register number REGNO from the dead registers list of INSN.
13874 Return the note used to record the death, if there was one. */
13877 remove_death (unsigned int regno, rtx_insn *insn)
13879 rtx note = find_regno_note (insn, REG_DEAD, regno);
13881 if (note)
13882 remove_note (insn, note);
13884 return note;
13887 /* For each register (hardware or pseudo) used within expression X, if its
13888 death is in an instruction with luid between FROM_LUID (inclusive) and
13889 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13890 list headed by PNOTES.
13892 That said, don't move registers killed by maybe_kill_insn.
13894 This is done when X is being merged by combination into TO_INSN. These
13895 notes will then be distributed as needed. */
13897 static void
13898 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13899 rtx *pnotes)
13901 const char *fmt;
13902 int len, i;
13903 enum rtx_code code = GET_CODE (x);
13905 if (code == REG)
13907 unsigned int regno = REGNO (x);
13908 rtx_insn *where_dead = reg_stat[regno].last_death;
13910 /* Don't move the register if it gets killed in between from and to. */
13911 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13912 && ! reg_referenced_p (x, maybe_kill_insn))
13913 return;
13915 if (where_dead
13916 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13917 && DF_INSN_LUID (where_dead) >= from_luid
13918 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13920 rtx note = remove_death (regno, where_dead);
13922 /* It is possible for the call above to return 0. This can occur
13923 when last_death points to I2 or I1 that we combined with.
13924 In that case make a new note.
13926 We must also check for the case where X is a hard register
13927 and NOTE is a death note for a range of hard registers
13928 including X. In that case, we must put REG_DEAD notes for
13929 the remaining registers in place of NOTE. */
13931 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13932 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13934 unsigned int deadregno = REGNO (XEXP (note, 0));
13935 unsigned int deadend = END_REGNO (XEXP (note, 0));
13936 unsigned int ourend = END_REGNO (x);
13937 unsigned int i;
13939 for (i = deadregno; i < deadend; i++)
13940 if (i < regno || i >= ourend)
13941 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13944 /* If we didn't find any note, or if we found a REG_DEAD note that
13945 covers only part of the given reg, and we have a multi-reg hard
13946 register, then to be safe we must check for REG_DEAD notes
13947 for each register other than the first. They could have
13948 their own REG_DEAD notes lying around. */
13949 else if ((note == 0
13950 || (note != 0
13951 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
13952 GET_MODE (x))))
13953 && regno < FIRST_PSEUDO_REGISTER
13954 && REG_NREGS (x) > 1)
13956 unsigned int ourend = END_REGNO (x);
13957 unsigned int i, offset;
13958 rtx oldnotes = 0;
13960 if (note)
13961 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
13962 else
13963 offset = 1;
13965 for (i = regno + offset; i < ourend; i++)
13966 move_deaths (regno_reg_rtx[i],
13967 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13970 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13972 XEXP (note, 1) = *pnotes;
13973 *pnotes = note;
13975 else
13976 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13979 return;
13982 else if (GET_CODE (x) == SET)
13984 rtx dest = SET_DEST (x);
13986 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13988 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13989 that accesses one word of a multi-word item, some
13990 piece of everything register in the expression is used by
13991 this insn, so remove any old death. */
13992 /* ??? So why do we test for equality of the sizes? */
13994 if (GET_CODE (dest) == ZERO_EXTRACT
13995 || GET_CODE (dest) == STRICT_LOW_PART
13996 || (GET_CODE (dest) == SUBREG
13997 && !read_modify_subreg_p (dest)))
13999 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14000 return;
14003 /* If this is some other SUBREG, we know it replaces the entire
14004 value, so use that as the destination. */
14005 if (GET_CODE (dest) == SUBREG)
14006 dest = SUBREG_REG (dest);
14008 /* If this is a MEM, adjust deaths of anything used in the address.
14009 For a REG (the only other possibility), the entire value is
14010 being replaced so the old value is not used in this insn. */
14012 if (MEM_P (dest))
14013 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14014 to_insn, pnotes);
14015 return;
14018 else if (GET_CODE (x) == CLOBBER)
14019 return;
14021 len = GET_RTX_LENGTH (code);
14022 fmt = GET_RTX_FORMAT (code);
14024 for (i = 0; i < len; i++)
14026 if (fmt[i] == 'E')
14028 int j;
14029 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14030 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14031 to_insn, pnotes);
14033 else if (fmt[i] == 'e')
14034 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14038 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14039 pattern of an insn. X must be a REG. */
14041 static int
14042 reg_bitfield_target_p (rtx x, rtx body)
14044 int i;
14046 if (GET_CODE (body) == SET)
14048 rtx dest = SET_DEST (body);
14049 rtx target;
14050 unsigned int regno, tregno, endregno, endtregno;
14052 if (GET_CODE (dest) == ZERO_EXTRACT)
14053 target = XEXP (dest, 0);
14054 else if (GET_CODE (dest) == STRICT_LOW_PART)
14055 target = SUBREG_REG (XEXP (dest, 0));
14056 else
14057 return 0;
14059 if (GET_CODE (target) == SUBREG)
14060 target = SUBREG_REG (target);
14062 if (!REG_P (target))
14063 return 0;
14065 tregno = REGNO (target), regno = REGNO (x);
14066 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14067 return target == x;
14069 endtregno = end_hard_regno (GET_MODE (target), tregno);
14070 endregno = end_hard_regno (GET_MODE (x), regno);
14072 return endregno > tregno && regno < endtregno;
14075 else if (GET_CODE (body) == PARALLEL)
14076 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14077 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14078 return 1;
14080 return 0;
14083 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14084 as appropriate. I3 and I2 are the insns resulting from the combination
14085 insns including FROM (I2 may be zero).
14087 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14088 not need REG_DEAD notes because they are being substituted for. This
14089 saves searching in the most common cases.
14091 Each note in the list is either ignored or placed on some insns, depending
14092 on the type of note. */
14094 static void
14095 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14096 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14098 rtx note, next_note;
14099 rtx tem_note;
14100 rtx_insn *tem_insn;
14102 for (note = notes; note; note = next_note)
14104 rtx_insn *place = 0, *place2 = 0;
14106 next_note = XEXP (note, 1);
14107 switch (REG_NOTE_KIND (note))
14109 case REG_BR_PROB:
14110 case REG_BR_PRED:
14111 /* Doesn't matter much where we put this, as long as it's somewhere.
14112 It is preferable to keep these notes on branches, which is most
14113 likely to be i3. */
14114 place = i3;
14115 break;
14117 case REG_NON_LOCAL_GOTO:
14118 if (JUMP_P (i3))
14119 place = i3;
14120 else
14122 gcc_assert (i2 && JUMP_P (i2));
14123 place = i2;
14125 break;
14127 case REG_EH_REGION:
14128 /* These notes must remain with the call or trapping instruction. */
14129 if (CALL_P (i3))
14130 place = i3;
14131 else if (i2 && CALL_P (i2))
14132 place = i2;
14133 else
14135 gcc_assert (cfun->can_throw_non_call_exceptions);
14136 if (may_trap_p (i3))
14137 place = i3;
14138 else if (i2 && may_trap_p (i2))
14139 place = i2;
14140 /* ??? Otherwise assume we've combined things such that we
14141 can now prove that the instructions can't trap. Drop the
14142 note in this case. */
14144 break;
14146 case REG_ARGS_SIZE:
14147 /* ??? How to distribute between i3-i1. Assume i3 contains the
14148 entire adjustment. Assert i3 contains at least some adjust. */
14149 if (!noop_move_p (i3))
14151 int old_size, args_size = INTVAL (XEXP (note, 0));
14152 /* fixup_args_size_notes looks at REG_NORETURN note,
14153 so ensure the note is placed there first. */
14154 if (CALL_P (i3))
14156 rtx *np;
14157 for (np = &next_note; *np; np = &XEXP (*np, 1))
14158 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14160 rtx n = *np;
14161 *np = XEXP (n, 1);
14162 XEXP (n, 1) = REG_NOTES (i3);
14163 REG_NOTES (i3) = n;
14164 break;
14167 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14168 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14169 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14170 gcc_assert (old_size != args_size
14171 || (CALL_P (i3)
14172 && !ACCUMULATE_OUTGOING_ARGS
14173 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14175 break;
14177 case REG_NORETURN:
14178 case REG_SETJMP:
14179 case REG_TM:
14180 case REG_CALL_DECL:
14181 case REG_CALL_NOCF_CHECK:
14182 /* These notes must remain with the call. It should not be
14183 possible for both I2 and I3 to be a call. */
14184 if (CALL_P (i3))
14185 place = i3;
14186 else
14188 gcc_assert (i2 && CALL_P (i2));
14189 place = i2;
14191 break;
14193 case REG_UNUSED:
14194 /* Any clobbers for i3 may still exist, and so we must process
14195 REG_UNUSED notes from that insn.
14197 Any clobbers from i2 or i1 can only exist if they were added by
14198 recog_for_combine. In that case, recog_for_combine created the
14199 necessary REG_UNUSED notes. Trying to keep any original
14200 REG_UNUSED notes from these insns can cause incorrect output
14201 if it is for the same register as the original i3 dest.
14202 In that case, we will notice that the register is set in i3,
14203 and then add a REG_UNUSED note for the destination of i3, which
14204 is wrong. However, it is possible to have REG_UNUSED notes from
14205 i2 or i1 for register which were both used and clobbered, so
14206 we keep notes from i2 or i1 if they will turn into REG_DEAD
14207 notes. */
14209 /* If this register is set or clobbered in I3, put the note there
14210 unless there is one already. */
14211 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14213 if (from_insn != i3)
14214 break;
14216 if (! (REG_P (XEXP (note, 0))
14217 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14218 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14219 place = i3;
14221 /* Otherwise, if this register is used by I3, then this register
14222 now dies here, so we must put a REG_DEAD note here unless there
14223 is one already. */
14224 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14225 && ! (REG_P (XEXP (note, 0))
14226 ? find_regno_note (i3, REG_DEAD,
14227 REGNO (XEXP (note, 0)))
14228 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14230 PUT_REG_NOTE_KIND (note, REG_DEAD);
14231 place = i3;
14233 break;
14235 case REG_EQUAL:
14236 case REG_EQUIV:
14237 case REG_NOALIAS:
14238 /* These notes say something about results of an insn. We can
14239 only support them if they used to be on I3 in which case they
14240 remain on I3. Otherwise they are ignored.
14242 If the note refers to an expression that is not a constant, we
14243 must also ignore the note since we cannot tell whether the
14244 equivalence is still true. It might be possible to do
14245 slightly better than this (we only have a problem if I2DEST
14246 or I1DEST is present in the expression), but it doesn't
14247 seem worth the trouble. */
14249 if (from_insn == i3
14250 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14251 place = i3;
14252 break;
14254 case REG_INC:
14255 /* These notes say something about how a register is used. They must
14256 be present on any use of the register in I2 or I3. */
14257 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14258 place = i3;
14260 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14262 if (place)
14263 place2 = i2;
14264 else
14265 place = i2;
14267 break;
14269 case REG_LABEL_TARGET:
14270 case REG_LABEL_OPERAND:
14271 /* This can show up in several ways -- either directly in the
14272 pattern, or hidden off in the constant pool with (or without?)
14273 a REG_EQUAL note. */
14274 /* ??? Ignore the without-reg_equal-note problem for now. */
14275 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14276 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14277 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14278 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14279 place = i3;
14281 if (i2
14282 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14283 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14284 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14285 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14287 if (place)
14288 place2 = i2;
14289 else
14290 place = i2;
14293 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14294 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14295 there. */
14296 if (place && JUMP_P (place)
14297 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14298 && (JUMP_LABEL (place) == NULL
14299 || JUMP_LABEL (place) == XEXP (note, 0)))
14301 rtx label = JUMP_LABEL (place);
14303 if (!label)
14304 JUMP_LABEL (place) = XEXP (note, 0);
14305 else if (LABEL_P (label))
14306 LABEL_NUSES (label)--;
14309 if (place2 && JUMP_P (place2)
14310 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14311 && (JUMP_LABEL (place2) == NULL
14312 || JUMP_LABEL (place2) == XEXP (note, 0)))
14314 rtx label = JUMP_LABEL (place2);
14316 if (!label)
14317 JUMP_LABEL (place2) = XEXP (note, 0);
14318 else if (LABEL_P (label))
14319 LABEL_NUSES (label)--;
14320 place2 = 0;
14322 break;
14324 case REG_NONNEG:
14325 /* This note says something about the value of a register prior
14326 to the execution of an insn. It is too much trouble to see
14327 if the note is still correct in all situations. It is better
14328 to simply delete it. */
14329 break;
14331 case REG_DEAD:
14332 /* If we replaced the right hand side of FROM_INSN with a
14333 REG_EQUAL note, the original use of the dying register
14334 will not have been combined into I3 and I2. In such cases,
14335 FROM_INSN is guaranteed to be the first of the combined
14336 instructions, so we simply need to search back before
14337 FROM_INSN for the previous use or set of this register,
14338 then alter the notes there appropriately.
14340 If the register is used as an input in I3, it dies there.
14341 Similarly for I2, if it is nonzero and adjacent to I3.
14343 If the register is not used as an input in either I3 or I2
14344 and it is not one of the registers we were supposed to eliminate,
14345 there are two possibilities. We might have a non-adjacent I2
14346 or we might have somehow eliminated an additional register
14347 from a computation. For example, we might have had A & B where
14348 we discover that B will always be zero. In this case we will
14349 eliminate the reference to A.
14351 In both cases, we must search to see if we can find a previous
14352 use of A and put the death note there. */
14354 if (from_insn
14355 && from_insn == i2mod
14356 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14357 tem_insn = from_insn;
14358 else
14360 if (from_insn
14361 && CALL_P (from_insn)
14362 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14363 place = from_insn;
14364 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14366 /* If the new I2 sets the same register that is marked
14367 dead in the note, we do not in general know where to
14368 put the note. One important case we _can_ handle is
14369 when the note comes from I3. */
14370 if (from_insn == i3)
14371 place = i3;
14372 else
14373 break;
14375 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14376 place = i3;
14377 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14378 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14379 place = i2;
14380 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14381 && !(i2mod
14382 && reg_overlap_mentioned_p (XEXP (note, 0),
14383 i2mod_old_rhs)))
14384 || rtx_equal_p (XEXP (note, 0), elim_i1)
14385 || rtx_equal_p (XEXP (note, 0), elim_i0))
14386 break;
14387 tem_insn = i3;
14390 if (place == 0)
14392 basic_block bb = this_basic_block;
14394 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14396 if (!NONDEBUG_INSN_P (tem_insn))
14398 if (tem_insn == BB_HEAD (bb))
14399 break;
14400 continue;
14403 /* If the register is being set at TEM_INSN, see if that is all
14404 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14405 into a REG_UNUSED note instead. Don't delete sets to
14406 global register vars. */
14407 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14408 || !global_regs[REGNO (XEXP (note, 0))])
14409 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14411 rtx set = single_set (tem_insn);
14412 rtx inner_dest = 0;
14413 rtx_insn *cc0_setter = NULL;
14415 if (set != 0)
14416 for (inner_dest = SET_DEST (set);
14417 (GET_CODE (inner_dest) == STRICT_LOW_PART
14418 || GET_CODE (inner_dest) == SUBREG
14419 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14420 inner_dest = XEXP (inner_dest, 0))
14423 /* Verify that it was the set, and not a clobber that
14424 modified the register.
14426 CC0 targets must be careful to maintain setter/user
14427 pairs. If we cannot delete the setter due to side
14428 effects, mark the user with an UNUSED note instead
14429 of deleting it. */
14431 if (set != 0 && ! side_effects_p (SET_SRC (set))
14432 && rtx_equal_p (XEXP (note, 0), inner_dest)
14433 && (!HAVE_cc0
14434 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14435 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14436 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14438 /* Move the notes and links of TEM_INSN elsewhere.
14439 This might delete other dead insns recursively.
14440 First set the pattern to something that won't use
14441 any register. */
14442 rtx old_notes = REG_NOTES (tem_insn);
14444 PATTERN (tem_insn) = pc_rtx;
14445 REG_NOTES (tem_insn) = NULL;
14447 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14448 NULL_RTX, NULL_RTX, NULL_RTX);
14449 distribute_links (LOG_LINKS (tem_insn));
14451 unsigned int regno = REGNO (XEXP (note, 0));
14452 reg_stat_type *rsp = &reg_stat[regno];
14453 if (rsp->last_set == tem_insn)
14454 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14456 SET_INSN_DELETED (tem_insn);
14457 if (tem_insn == i2)
14458 i2 = NULL;
14460 /* Delete the setter too. */
14461 if (cc0_setter)
14463 PATTERN (cc0_setter) = pc_rtx;
14464 old_notes = REG_NOTES (cc0_setter);
14465 REG_NOTES (cc0_setter) = NULL;
14467 distribute_notes (old_notes, cc0_setter,
14468 cc0_setter, NULL,
14469 NULL_RTX, NULL_RTX, NULL_RTX);
14470 distribute_links (LOG_LINKS (cc0_setter));
14472 SET_INSN_DELETED (cc0_setter);
14473 if (cc0_setter == i2)
14474 i2 = NULL;
14477 else
14479 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14481 /* If there isn't already a REG_UNUSED note, put one
14482 here. Do not place a REG_DEAD note, even if
14483 the register is also used here; that would not
14484 match the algorithm used in lifetime analysis
14485 and can cause the consistency check in the
14486 scheduler to fail. */
14487 if (! find_regno_note (tem_insn, REG_UNUSED,
14488 REGNO (XEXP (note, 0))))
14489 place = tem_insn;
14490 break;
14493 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14494 || (CALL_P (tem_insn)
14495 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14497 place = tem_insn;
14499 /* If we are doing a 3->2 combination, and we have a
14500 register which formerly died in i3 and was not used
14501 by i2, which now no longer dies in i3 and is used in
14502 i2 but does not die in i2, and place is between i2
14503 and i3, then we may need to move a link from place to
14504 i2. */
14505 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14506 && from_insn
14507 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14508 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14510 struct insn_link *links = LOG_LINKS (place);
14511 LOG_LINKS (place) = NULL;
14512 distribute_links (links);
14514 break;
14517 if (tem_insn == BB_HEAD (bb))
14518 break;
14523 /* If the register is set or already dead at PLACE, we needn't do
14524 anything with this note if it is still a REG_DEAD note.
14525 We check here if it is set at all, not if is it totally replaced,
14526 which is what `dead_or_set_p' checks, so also check for it being
14527 set partially. */
14529 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14531 unsigned int regno = REGNO (XEXP (note, 0));
14532 reg_stat_type *rsp = &reg_stat[regno];
14534 if (dead_or_set_p (place, XEXP (note, 0))
14535 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14537 /* Unless the register previously died in PLACE, clear
14538 last_death. [I no longer understand why this is
14539 being done.] */
14540 if (rsp->last_death != place)
14541 rsp->last_death = 0;
14542 place = 0;
14544 else
14545 rsp->last_death = place;
14547 /* If this is a death note for a hard reg that is occupying
14548 multiple registers, ensure that we are still using all
14549 parts of the object. If we find a piece of the object
14550 that is unused, we must arrange for an appropriate REG_DEAD
14551 note to be added for it. However, we can't just emit a USE
14552 and tag the note to it, since the register might actually
14553 be dead; so we recourse, and the recursive call then finds
14554 the previous insn that used this register. */
14556 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14558 unsigned int endregno = END_REGNO (XEXP (note, 0));
14559 bool all_used = true;
14560 unsigned int i;
14562 for (i = regno; i < endregno; i++)
14563 if ((! refers_to_regno_p (i, PATTERN (place))
14564 && ! find_regno_fusage (place, USE, i))
14565 || dead_or_set_regno_p (place, i))
14567 all_used = false;
14568 break;
14571 if (! all_used)
14573 /* Put only REG_DEAD notes for pieces that are
14574 not already dead or set. */
14576 for (i = regno; i < endregno;
14577 i += hard_regno_nregs (i, reg_raw_mode[i]))
14579 rtx piece = regno_reg_rtx[i];
14580 basic_block bb = this_basic_block;
14582 if (! dead_or_set_p (place, piece)
14583 && ! reg_bitfield_target_p (piece,
14584 PATTERN (place)))
14586 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14587 NULL_RTX);
14589 distribute_notes (new_note, place, place,
14590 NULL, NULL_RTX, NULL_RTX,
14591 NULL_RTX);
14593 else if (! refers_to_regno_p (i, PATTERN (place))
14594 && ! find_regno_fusage (place, USE, i))
14595 for (tem_insn = PREV_INSN (place); ;
14596 tem_insn = PREV_INSN (tem_insn))
14598 if (!NONDEBUG_INSN_P (tem_insn))
14600 if (tem_insn == BB_HEAD (bb))
14601 break;
14602 continue;
14604 if (dead_or_set_p (tem_insn, piece)
14605 || reg_bitfield_target_p (piece,
14606 PATTERN (tem_insn)))
14608 add_reg_note (tem_insn, REG_UNUSED, piece);
14609 break;
14614 place = 0;
14618 break;
14620 default:
14621 /* Any other notes should not be present at this point in the
14622 compilation. */
14623 gcc_unreachable ();
14626 if (place)
14628 XEXP (note, 1) = REG_NOTES (place);
14629 REG_NOTES (place) = note;
14632 if (place2)
14633 add_shallow_copy_of_reg_note (place2, note);
14637 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14638 I3, I2, and I1 to new locations. This is also called to add a link
14639 pointing at I3 when I3's destination is changed. */
14641 static void
14642 distribute_links (struct insn_link *links)
14644 struct insn_link *link, *next_link;
14646 for (link = links; link; link = next_link)
14648 rtx_insn *place = 0;
14649 rtx_insn *insn;
14650 rtx set, reg;
14652 next_link = link->next;
14654 /* If the insn that this link points to is a NOTE, ignore it. */
14655 if (NOTE_P (link->insn))
14656 continue;
14658 set = 0;
14659 rtx pat = PATTERN (link->insn);
14660 if (GET_CODE (pat) == SET)
14661 set = pat;
14662 else if (GET_CODE (pat) == PARALLEL)
14664 int i;
14665 for (i = 0; i < XVECLEN (pat, 0); i++)
14667 set = XVECEXP (pat, 0, i);
14668 if (GET_CODE (set) != SET)
14669 continue;
14671 reg = SET_DEST (set);
14672 while (GET_CODE (reg) == ZERO_EXTRACT
14673 || GET_CODE (reg) == STRICT_LOW_PART
14674 || GET_CODE (reg) == SUBREG)
14675 reg = XEXP (reg, 0);
14677 if (!REG_P (reg))
14678 continue;
14680 if (REGNO (reg) == link->regno)
14681 break;
14683 if (i == XVECLEN (pat, 0))
14684 continue;
14686 else
14687 continue;
14689 reg = SET_DEST (set);
14691 while (GET_CODE (reg) == ZERO_EXTRACT
14692 || GET_CODE (reg) == STRICT_LOW_PART
14693 || GET_CODE (reg) == SUBREG)
14694 reg = XEXP (reg, 0);
14696 /* A LOG_LINK is defined as being placed on the first insn that uses
14697 a register and points to the insn that sets the register. Start
14698 searching at the next insn after the target of the link and stop
14699 when we reach a set of the register or the end of the basic block.
14701 Note that this correctly handles the link that used to point from
14702 I3 to I2. Also note that not much searching is typically done here
14703 since most links don't point very far away. */
14705 for (insn = NEXT_INSN (link->insn);
14706 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14707 || BB_HEAD (this_basic_block->next_bb) != insn));
14708 insn = NEXT_INSN (insn))
14709 if (DEBUG_INSN_P (insn))
14710 continue;
14711 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14713 if (reg_referenced_p (reg, PATTERN (insn)))
14714 place = insn;
14715 break;
14717 else if (CALL_P (insn)
14718 && find_reg_fusage (insn, USE, reg))
14720 place = insn;
14721 break;
14723 else if (INSN_P (insn) && reg_set_p (reg, insn))
14724 break;
14726 /* If we found a place to put the link, place it there unless there
14727 is already a link to the same insn as LINK at that point. */
14729 if (place)
14731 struct insn_link *link2;
14733 FOR_EACH_LOG_LINK (link2, place)
14734 if (link2->insn == link->insn && link2->regno == link->regno)
14735 break;
14737 if (link2 == NULL)
14739 link->next = LOG_LINKS (place);
14740 LOG_LINKS (place) = link;
14742 /* Set added_links_insn to the earliest insn we added a
14743 link to. */
14744 if (added_links_insn == 0
14745 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14746 added_links_insn = place;
14752 /* Check for any register or memory mentioned in EQUIV that is not
14753 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14754 of EXPR where some registers may have been replaced by constants. */
14756 static bool
14757 unmentioned_reg_p (rtx equiv, rtx expr)
14759 subrtx_iterator::array_type array;
14760 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14762 const_rtx x = *iter;
14763 if ((REG_P (x) || MEM_P (x))
14764 && !reg_mentioned_p (x, expr))
14765 return true;
14767 return false;
14770 DEBUG_FUNCTION void
14771 dump_combine_stats (FILE *file)
14773 fprintf
14774 (file,
14775 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14776 combine_attempts, combine_merges, combine_extras, combine_successes);
14779 void
14780 dump_combine_total_stats (FILE *file)
14782 fprintf
14783 (file,
14784 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14785 total_attempts, total_merges, total_extras, total_successes);
14788 /* Try combining insns through substitution. */
14789 static unsigned int
14790 rest_of_handle_combine (void)
14792 int rebuild_jump_labels_after_combine;
14794 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14795 df_note_add_problem ();
14796 df_analyze ();
14798 regstat_init_n_sets_and_refs ();
14799 reg_n_sets_max = max_reg_num ();
14801 rebuild_jump_labels_after_combine
14802 = combine_instructions (get_insns (), max_reg_num ());
14804 /* Combining insns may have turned an indirect jump into a
14805 direct jump. Rebuild the JUMP_LABEL fields of jumping
14806 instructions. */
14807 if (rebuild_jump_labels_after_combine)
14809 if (dom_info_available_p (CDI_DOMINATORS))
14810 free_dominance_info (CDI_DOMINATORS);
14811 timevar_push (TV_JUMP);
14812 rebuild_jump_labels (get_insns ());
14813 cleanup_cfg (0);
14814 timevar_pop (TV_JUMP);
14817 regstat_free_n_sets_and_refs ();
14818 return 0;
14821 namespace {
14823 const pass_data pass_data_combine =
14825 RTL_PASS, /* type */
14826 "combine", /* name */
14827 OPTGROUP_NONE, /* optinfo_flags */
14828 TV_COMBINE, /* tv_id */
14829 PROP_cfglayout, /* properties_required */
14830 0, /* properties_provided */
14831 0, /* properties_destroyed */
14832 0, /* todo_flags_start */
14833 TODO_df_finish, /* todo_flags_finish */
14836 class pass_combine : public rtl_opt_pass
14838 public:
14839 pass_combine (gcc::context *ctxt)
14840 : rtl_opt_pass (pass_data_combine, ctxt)
14843 /* opt_pass methods: */
14844 virtual bool gate (function *) { return (optimize > 0); }
14845 virtual unsigned int execute (function *)
14847 return rest_of_handle_combine ();
14850 }; // class pass_combine
14852 } // anon namespace
14854 rtl_opt_pass *
14855 make_pass_combine (gcc::context *ctxt)
14857 return new pass_combine (ctxt);