Fix -Wimplicit-fallthrough in combine.c
[official-gcc.git] / gcc / combine.c
blob757ae6fc93e48bd2a7a289c1009a27c01d3232f1
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras;
120 /* Number of instructions combined in this function. */
122 static int combine_successes;
124 /* Totals over entire compilation. */
126 static int total_attempts, total_merges, total_extras, total_successes;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn *i2mod;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs;
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
149 /* Record last point of modification of (hard or pseudo) register n. */
150 rtx_insn *last_set;
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
159 following ways:
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
174 register's value
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
179 table.
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
197 rtx last_set_value;
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick;
204 /* Record the value of label_tick when the value for register n is placed in
205 last_set_value. */
207 int last_set_label;
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
232 zero.
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies;
238 unsigned HOST_WIDE_INT nonzero_bits;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
249 value. */
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
255 static vec<reg_stat_type> reg_stat;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn *subst_insn;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
301 that location. */
303 static rtx_insn *added_links_insn;
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block;
307 static bool optimize_this_for_speed_p;
310 /* Length of the currently allocated uid_insn_cost array. */
312 static int max_uid_known;
314 /* The following array records the insn_cost for every insn
315 in the instruction stream. */
317 static int *uid_insn_cost;
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
322 struct insn_link {
323 rtx_insn *insn;
324 unsigned int regno;
325 struct insn_link *next;
328 static struct insn_link **uid_log_links;
330 static inline int
331 insn_uid_check (const_rtx insn)
333 int uid = INSN_UID (insn);
334 gcc_checking_assert (uid <= max_uid_known);
335 return uid;
338 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
341 #define FOR_EACH_LOG_LINK(L, INSN) \
342 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
344 /* Links for LOG_LINKS are allocated from this obstack. */
346 static struct obstack insn_link_obstack;
348 /* Allocate a link. */
350 static inline struct insn_link *
351 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
353 struct insn_link *l
354 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
355 sizeof (struct insn_link));
356 l->insn = insn;
357 l->regno = regno;
358 l->next = next;
359 return l;
362 /* Incremented for each basic block. */
364 static int label_tick;
366 /* Reset to label_tick for each extended basic block in scanning order. */
368 static int label_tick_ebb_start;
370 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
371 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
373 static scalar_int_mode nonzero_bits_mode;
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376 be safely used. It is zero while computing them and after combine has
377 completed. This former test prevents propagating values based on
378 previously set values, which can be incorrect if a variable is modified
379 in a loop. */
381 static int nonzero_sign_valid;
384 /* Record one modification to rtl structure
385 to be undone by storing old_contents into *where. */
387 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
389 struct undo
391 struct undo *next;
392 enum undo_kind kind;
393 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
394 union { rtx *r; int *i; struct insn_link **l; } where;
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398 num_undo says how many are currently recorded.
400 other_insn is nonzero if we have modified some other insn in the process
401 of working on subst_insn. It must be verified too. */
403 struct undobuf
405 struct undo *undos;
406 struct undo *frees;
407 rtx_insn *other_insn;
410 static struct undobuf undobuf;
412 /* Number of times the pseudo being substituted for
413 was found and replaced. */
415 static int n_occurrences;
417 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
418 scalar_int_mode,
419 unsigned HOST_WIDE_INT *);
420 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
421 scalar_int_mode,
422 unsigned int *);
423 static void do_SUBST (rtx *, rtx);
424 static void do_SUBST_INT (int *, int);
425 static void init_reg_last (void);
426 static void setup_incoming_promotions (rtx_insn *);
427 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
428 static int cant_combine_insn_p (rtx_insn *);
429 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
430 rtx_insn *, rtx_insn *, rtx *, rtx *);
431 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
432 static int contains_muldiv (rtx);
433 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
434 int *, rtx_insn *);
435 static void undo_all (void);
436 static void undo_commit (void);
437 static rtx *find_split_point (rtx *, rtx_insn *, bool);
438 static rtx subst (rtx, rtx, rtx, int, int, int);
439 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
440 static rtx simplify_if_then_else (rtx);
441 static rtx simplify_set (rtx);
442 static rtx simplify_logical (rtx);
443 static rtx expand_compound_operation (rtx);
444 static const_rtx expand_field_assignment (const_rtx);
445 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
446 rtx, unsigned HOST_WIDE_INT, int, int, int);
447 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
448 unsigned HOST_WIDE_INT *);
449 static rtx canon_reg_for_combine (rtx, rtx);
450 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
451 scalar_int_mode, unsigned HOST_WIDE_INT, int);
452 static rtx force_to_mode (rtx, machine_mode,
453 unsigned HOST_WIDE_INT, int);
454 static rtx if_then_else_cond (rtx, rtx *, rtx *);
455 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
456 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
457 static rtx make_field_assignment (rtx);
458 static rtx apply_distributive_law (rtx);
459 static rtx distribute_and_simplify_rtx (rtx, int);
460 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
461 unsigned HOST_WIDE_INT);
462 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
463 unsigned HOST_WIDE_INT);
464 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
465 HOST_WIDE_INT, machine_mode, int *);
466 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
467 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
468 int);
469 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
470 static rtx gen_lowpart_for_combine (machine_mode, rtx);
471 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
472 rtx, rtx *);
473 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
474 static void update_table_tick (rtx);
475 static void record_value_for_reg (rtx, rtx_insn *, rtx);
476 static void check_promoted_subreg (rtx_insn *, rtx);
477 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
478 static void record_dead_and_set_regs (rtx_insn *);
479 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
480 static rtx get_last_value (const_rtx);
481 static int use_crosses_set_p (const_rtx, int);
482 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
483 static int reg_dead_at_p (rtx, rtx_insn *);
484 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
485 static int reg_bitfield_target_p (rtx, rtx);
486 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
487 static void distribute_links (struct insn_link *);
488 static void mark_used_regs_combine (rtx);
489 static void record_promoted_value (rtx_insn *, rtx);
490 static bool unmentioned_reg_p (rtx, rtx);
491 static void record_truncated_values (rtx *, void *);
492 static bool reg_truncated_to_mode (machine_mode, const_rtx);
493 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
496 /* It is not safe to use ordinary gen_lowpart in combine.
497 See comments in gen_lowpart_for_combine. */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
501 /* Our implementation of gen_lowpart never emits a new pseudo. */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
514 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518 Target hooks cannot use enum rtx_code. */
519 static inline void
520 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
521 bool op0_preserve_value)
523 int code_int = (int)*code;
524 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
525 *code = (enum rtx_code)code_int;
528 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
529 PATTERN can not be split. Otherwise, it returns an insn sequence.
530 This is a wrapper around split_insns which ensures that the
531 reg_stat vector is made larger if the splitter creates a new
532 register. */
534 static rtx_insn *
535 combine_split_insns (rtx pattern, rtx_insn *insn)
537 rtx_insn *ret;
538 unsigned int nregs;
540 ret = split_insns (pattern, insn);
541 nregs = max_reg_num ();
542 if (nregs > reg_stat.length ())
543 reg_stat.safe_grow_cleared (nregs);
544 return ret;
547 /* This is used by find_single_use to locate an rtx in LOC that
548 contains exactly one use of DEST, which is typically either a REG
549 or CC0. It returns a pointer to the innermost rtx expression
550 containing DEST. Appearances of DEST that are being used to
551 totally replace it are not counted. */
553 static rtx *
554 find_single_use_1 (rtx dest, rtx *loc)
556 rtx x = *loc;
557 enum rtx_code code = GET_CODE (x);
558 rtx *result = NULL;
559 rtx *this_result;
560 int i;
561 const char *fmt;
563 switch (code)
565 case CONST:
566 case LABEL_REF:
567 case SYMBOL_REF:
568 CASE_CONST_ANY:
569 case CLOBBER:
570 return 0;
572 case SET:
573 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 of a REG that occupies all of the REG, the insn uses DEST if
575 it is mentioned in the destination or the source. Otherwise, we
576 need just check the source. */
577 if (GET_CODE (SET_DEST (x)) != CC0
578 && GET_CODE (SET_DEST (x)) != PC
579 && !REG_P (SET_DEST (x))
580 && ! (GET_CODE (SET_DEST (x)) == SUBREG
581 && REG_P (SUBREG_REG (SET_DEST (x)))
582 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
583 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
584 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
585 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
586 break;
588 return find_single_use_1 (dest, &SET_SRC (x));
590 case MEM:
591 case SUBREG:
592 return find_single_use_1 (dest, &XEXP (x, 0));
594 default:
595 break;
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
601 fmt = GET_RTX_FORMAT (code);
602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
604 if (fmt[i] == 'e')
606 if (dest == XEXP (x, i)
607 || (REG_P (dest) && REG_P (XEXP (x, i))
608 && REGNO (dest) == REGNO (XEXP (x, i))))
609 this_result = loc;
610 else
611 this_result = find_single_use_1 (dest, &XEXP (x, i));
613 if (result == NULL)
614 result = this_result;
615 else if (this_result)
616 /* Duplicate usage. */
617 return NULL;
619 else if (fmt[i] == 'E')
621 int j;
623 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
625 if (XVECEXP (x, i, j) == dest
626 || (REG_P (dest)
627 && REG_P (XVECEXP (x, i, j))
628 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
629 this_result = loc;
630 else
631 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
633 if (result == NULL)
634 result = this_result;
635 else if (this_result)
636 return NULL;
641 return result;
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
647 it is used.
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
659 static rtx *
660 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
662 basic_block bb;
663 rtx_insn *next;
664 rtx *result;
665 struct insn_link *link;
667 if (dest == cc0_rtx)
669 next = NEXT_INSN (insn);
670 if (next == 0
671 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
672 return 0;
674 result = find_single_use_1 (dest, &PATTERN (next));
675 if (result && ploc)
676 *ploc = next;
677 return result;
680 if (!REG_P (dest))
681 return 0;
683 bb = BLOCK_FOR_INSN (insn);
684 for (next = NEXT_INSN (insn);
685 next && BLOCK_FOR_INSN (next) == bb;
686 next = NEXT_INSN (next))
687 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
689 FOR_EACH_LOG_LINK (link, next)
690 if (link->insn == insn && link->regno == REGNO (dest))
691 break;
693 if (link)
695 result = find_single_use_1 (dest, &PATTERN (next));
696 if (ploc)
697 *ploc = next;
698 return result;
702 return 0;
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
709 the undo table. */
711 static void
712 do_SUBST (rtx *into, rtx newval)
714 struct undo *buf;
715 rtx oldval = *into;
717 if (oldval == newval)
718 return;
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
726 && CONST_INT_P (newval))
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval)
731 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval))));
741 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval, 0))));
745 if (undobuf.frees)
746 buf = undobuf.frees, undobuf.frees = buf->next;
747 else
748 buf = XNEW (struct undo);
750 buf->kind = UNDO_RTX;
751 buf->where.r = into;
752 buf->old_contents.r = oldval;
753 *into = newval;
755 buf->next = undobuf.undos, undobuf.undos = buf;
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
762 not safe. */
764 static void
765 do_SUBST_INT (int *into, int newval)
767 struct undo *buf;
768 int oldval = *into;
770 if (oldval == newval)
771 return;
773 if (undobuf.frees)
774 buf = undobuf.frees, undobuf.frees = buf->next;
775 else
776 buf = XNEW (struct undo);
778 buf->kind = UNDO_INT;
779 buf->where.i = into;
780 buf->old_contents.i = oldval;
781 *into = newval;
783 buf->next = undobuf.undos, undobuf.undos = buf;
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
791 well. */
793 static void
794 do_SUBST_MODE (rtx *into, machine_mode newval)
796 struct undo *buf;
797 machine_mode oldval = GET_MODE (*into);
799 if (oldval == newval)
800 return;
802 if (undobuf.frees)
803 buf = undobuf.frees, undobuf.frees = buf->next;
804 else
805 buf = XNEW (struct undo);
807 buf->kind = UNDO_MODE;
808 buf->where.r = into;
809 buf->old_contents.m = oldval;
810 adjust_reg_mode (*into, newval);
812 buf->next = undobuf.undos, undobuf.undos = buf;
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
819 static void
820 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
822 struct undo *buf;
823 struct insn_link * oldval = *into;
825 if (oldval == newval)
826 return;
828 if (undobuf.frees)
829 buf = undobuf.frees, undobuf.frees = buf->next;
830 else
831 buf = XNEW (struct undo);
833 buf->kind = UNDO_LINKS;
834 buf->where.l = into;
835 buf->old_contents.l = oldval;
836 *into = newval;
838 buf->next = undobuf.undos, undobuf.undos = buf;
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
851 static bool
852 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
853 rtx newpat, rtx newi2pat, rtx newotherpat)
855 int i0_cost, i1_cost, i2_cost, i3_cost;
856 int new_i2_cost, new_i3_cost;
857 int old_cost, new_cost;
859 /* Lookup the original insn_costs. */
860 i2_cost = INSN_COST (i2);
861 i3_cost = INSN_COST (i3);
863 if (i1)
865 i1_cost = INSN_COST (i1);
866 if (i0)
868 i0_cost = INSN_COST (i0);
869 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
870 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
872 else
874 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
875 ? i1_cost + i2_cost + i3_cost : 0);
876 i0_cost = 0;
879 else
881 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
882 i1_cost = i0_cost = 0;
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
886 correct that. */
887 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
888 old_cost -= i1_cost;
891 /* Calculate the replacement insn_costs. */
892 rtx tmp = PATTERN (i3);
893 PATTERN (i3) = newpat;
894 int tmpi = INSN_CODE (i3);
895 INSN_CODE (i3) = -1;
896 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
897 PATTERN (i3) = tmp;
898 INSN_CODE (i3) = tmpi;
899 if (newi2pat)
901 tmp = PATTERN (i2);
902 PATTERN (i2) = newi2pat;
903 tmpi = INSN_CODE (i2);
904 INSN_CODE (i2) = -1;
905 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
906 PATTERN (i2) = tmp;
907 INSN_CODE (i2) = tmpi;
908 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
909 ? new_i2_cost + new_i3_cost : 0;
911 else
913 new_cost = new_i3_cost;
914 new_i2_cost = 0;
917 if (undobuf.other_insn)
919 int old_other_cost, new_other_cost;
921 old_other_cost = INSN_COST (undobuf.other_insn);
922 tmp = PATTERN (undobuf.other_insn);
923 PATTERN (undobuf.other_insn) = newotherpat;
924 tmpi = INSN_CODE (undobuf.other_insn);
925 INSN_CODE (undobuf.other_insn) = -1;
926 new_other_cost = insn_cost (undobuf.other_insn,
927 optimize_this_for_speed_p);
928 PATTERN (undobuf.other_insn) = tmp;
929 INSN_CODE (undobuf.other_insn) = tmpi;
930 if (old_other_cost > 0 && new_other_cost > 0)
932 old_cost += old_other_cost;
933 new_cost += new_other_cost;
935 else
936 old_cost = 0;
939 /* Disallow this combination if both new_cost and old_cost are greater than
940 zero, and new_cost is greater than old cost. */
941 int reject = old_cost > 0 && new_cost > old_cost;
943 if (dump_file)
945 fprintf (dump_file, "%s combination of insns ",
946 reject ? "rejecting" : "allowing");
947 if (i0)
948 fprintf (dump_file, "%d, ", INSN_UID (i0));
949 if (i1 && INSN_UID (i1) != INSN_UID (i2))
950 fprintf (dump_file, "%d, ", INSN_UID (i1));
951 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
953 fprintf (dump_file, "original costs ");
954 if (i0)
955 fprintf (dump_file, "%d + ", i0_cost);
956 if (i1 && INSN_UID (i1) != INSN_UID (i2))
957 fprintf (dump_file, "%d + ", i1_cost);
958 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
960 if (newi2pat)
961 fprintf (dump_file, "replacement costs %d + %d = %d\n",
962 new_i2_cost, new_i3_cost, new_cost);
963 else
964 fprintf (dump_file, "replacement cost %d\n", new_cost);
967 if (reject)
968 return false;
970 /* Update the uid_insn_cost array with the replacement costs. */
971 INSN_COST (i2) = new_i2_cost;
972 INSN_COST (i3) = new_i3_cost;
973 if (i1)
975 INSN_COST (i1) = 0;
976 if (i0)
977 INSN_COST (i0) = 0;
980 return true;
984 /* Delete any insns that copy a register to itself. */
986 static void
987 delete_noop_moves (void)
989 rtx_insn *insn, *next;
990 basic_block bb;
992 FOR_EACH_BB_FN (bb, cfun)
994 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
996 next = NEXT_INSN (insn);
997 if (INSN_P (insn) && noop_move_p (insn))
999 if (dump_file)
1000 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1002 delete_insn_and_edges (insn);
1009 /* Return false if we do not want to (or cannot) combine DEF. */
1010 static bool
1011 can_combine_def_p (df_ref def)
1013 /* Do not consider if it is pre/post modification in MEM. */
1014 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1015 return false;
1017 unsigned int regno = DF_REF_REGNO (def);
1019 /* Do not combine frame pointer adjustments. */
1020 if ((regno == FRAME_POINTER_REGNUM
1021 && (!reload_completed || frame_pointer_needed))
1022 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1023 && regno == HARD_FRAME_POINTER_REGNUM
1024 && (!reload_completed || frame_pointer_needed))
1025 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1026 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1027 return false;
1029 return true;
1032 /* Return false if we do not want to (or cannot) combine USE. */
1033 static bool
1034 can_combine_use_p (df_ref use)
1036 /* Do not consider the usage of the stack pointer by function call. */
1037 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1038 return false;
1040 return true;
1043 /* Fill in log links field for all insns. */
1045 static void
1046 create_log_links (void)
1048 basic_block bb;
1049 rtx_insn **next_use;
1050 rtx_insn *insn;
1051 df_ref def, use;
1053 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1055 /* Pass through each block from the end, recording the uses of each
1056 register and establishing log links when def is encountered.
1057 Note that we do not clear next_use array in order to save time,
1058 so we have to test whether the use is in the same basic block as def.
1060 There are a few cases below when we do not consider the definition or
1061 usage -- these are taken from original flow.c did. Don't ask me why it is
1062 done this way; I don't know and if it works, I don't want to know. */
1064 FOR_EACH_BB_FN (bb, cfun)
1066 FOR_BB_INSNS_REVERSE (bb, insn)
1068 if (!NONDEBUG_INSN_P (insn))
1069 continue;
1071 /* Log links are created only once. */
1072 gcc_assert (!LOG_LINKS (insn));
1074 FOR_EACH_INSN_DEF (def, insn)
1076 unsigned int regno = DF_REF_REGNO (def);
1077 rtx_insn *use_insn;
1079 if (!next_use[regno])
1080 continue;
1082 if (!can_combine_def_p (def))
1083 continue;
1085 use_insn = next_use[regno];
1086 next_use[regno] = NULL;
1088 if (BLOCK_FOR_INSN (use_insn) != bb)
1089 continue;
1091 /* flow.c claimed:
1093 We don't build a LOG_LINK for hard registers contained
1094 in ASM_OPERANDs. If these registers get replaced,
1095 we might wind up changing the semantics of the insn,
1096 even if reload can make what appear to be valid
1097 assignments later. */
1098 if (regno < FIRST_PSEUDO_REGISTER
1099 && asm_noperands (PATTERN (use_insn)) >= 0)
1100 continue;
1102 /* Don't add duplicate links between instructions. */
1103 struct insn_link *links;
1104 FOR_EACH_LOG_LINK (links, use_insn)
1105 if (insn == links->insn && regno == links->regno)
1106 break;
1108 if (!links)
1109 LOG_LINKS (use_insn)
1110 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1113 FOR_EACH_INSN_USE (use, insn)
1114 if (can_combine_use_p (use))
1115 next_use[DF_REF_REGNO (use)] = insn;
1119 free (next_use);
1122 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1123 true if we found a LOG_LINK that proves that A feeds B. This only works
1124 if there are no instructions between A and B which could have a link
1125 depending on A, since in that case we would not record a link for B.
1126 We also check the implicit dependency created by a cc0 setter/user
1127 pair. */
1129 static bool
1130 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1132 struct insn_link *links;
1133 FOR_EACH_LOG_LINK (links, b)
1134 if (links->insn == a)
1135 return true;
1136 if (HAVE_cc0 && sets_cc0_p (a))
1137 return true;
1138 return false;
1141 /* Main entry point for combiner. F is the first insn of the function.
1142 NREGS is the first unused pseudo-reg number.
1144 Return nonzero if the combiner has turned an indirect jump
1145 instruction into a direct jump. */
1146 static int
1147 combine_instructions (rtx_insn *f, unsigned int nregs)
1149 rtx_insn *insn, *next;
1150 rtx_insn *prev;
1151 struct insn_link *links, *nextlinks;
1152 rtx_insn *first;
1153 basic_block last_bb;
1155 int new_direct_jump_p = 0;
1157 for (first = f; first && !NONDEBUG_INSN_P (first); )
1158 first = NEXT_INSN (first);
1159 if (!first)
1160 return 0;
1162 combine_attempts = 0;
1163 combine_merges = 0;
1164 combine_extras = 0;
1165 combine_successes = 0;
1167 rtl_hooks = combine_rtl_hooks;
1169 reg_stat.safe_grow_cleared (nregs);
1171 init_recog_no_volatile ();
1173 /* Allocate array for insn info. */
1174 max_uid_known = get_max_uid ();
1175 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1176 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1177 gcc_obstack_init (&insn_link_obstack);
1179 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1181 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1182 problems when, for example, we have j <<= 1 in a loop. */
1184 nonzero_sign_valid = 0;
1185 label_tick = label_tick_ebb_start = 1;
1187 /* Scan all SETs and see if we can deduce anything about what
1188 bits are known to be zero for some registers and how many copies
1189 of the sign bit are known to exist for those registers.
1191 Also set any known values so that we can use it while searching
1192 for what bits are known to be set. */
1194 setup_incoming_promotions (first);
1195 /* Allow the entry block and the first block to fall into the same EBB.
1196 Conceptually the incoming promotions are assigned to the entry block. */
1197 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1199 create_log_links ();
1200 FOR_EACH_BB_FN (this_basic_block, cfun)
1202 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1203 last_call_luid = 0;
1204 mem_last_set = -1;
1206 label_tick++;
1207 if (!single_pred_p (this_basic_block)
1208 || single_pred (this_basic_block) != last_bb)
1209 label_tick_ebb_start = label_tick;
1210 last_bb = this_basic_block;
1212 FOR_BB_INSNS (this_basic_block, insn)
1213 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1215 rtx links;
1217 subst_low_luid = DF_INSN_LUID (insn);
1218 subst_insn = insn;
1220 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1221 insn);
1222 record_dead_and_set_regs (insn);
1224 if (AUTO_INC_DEC)
1225 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1226 if (REG_NOTE_KIND (links) == REG_INC)
1227 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1228 insn);
1230 /* Record the current insn_cost of this instruction. */
1231 if (NONJUMP_INSN_P (insn))
1232 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1233 if (dump_file)
1235 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1236 dump_insn_slim (dump_file, insn);
1241 nonzero_sign_valid = 1;
1243 /* Now scan all the insns in forward order. */
1244 label_tick = label_tick_ebb_start = 1;
1245 init_reg_last ();
1246 setup_incoming_promotions (first);
1247 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1248 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1250 FOR_EACH_BB_FN (this_basic_block, cfun)
1252 rtx_insn *last_combined_insn = NULL;
1254 /* Ignore instruction combination in basic blocks that are going to
1255 be removed as unreachable anyway. See PR82386. */
1256 if (EDGE_COUNT (this_basic_block->preds) == 0)
1257 continue;
1259 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1260 last_call_luid = 0;
1261 mem_last_set = -1;
1263 label_tick++;
1264 if (!single_pred_p (this_basic_block)
1265 || single_pred (this_basic_block) != last_bb)
1266 label_tick_ebb_start = label_tick;
1267 last_bb = this_basic_block;
1269 rtl_profile_for_bb (this_basic_block);
1270 for (insn = BB_HEAD (this_basic_block);
1271 insn != NEXT_INSN (BB_END (this_basic_block));
1272 insn = next ? next : NEXT_INSN (insn))
1274 next = 0;
1275 if (!NONDEBUG_INSN_P (insn))
1276 continue;
1278 while (last_combined_insn
1279 && (!NONDEBUG_INSN_P (last_combined_insn)
1280 || last_combined_insn->deleted ()))
1281 last_combined_insn = PREV_INSN (last_combined_insn);
1282 if (last_combined_insn == NULL_RTX
1283 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1284 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1285 last_combined_insn = insn;
1287 /* See if we know about function return values before this
1288 insn based upon SUBREG flags. */
1289 check_promoted_subreg (insn, PATTERN (insn));
1291 /* See if we can find hardregs and subreg of pseudos in
1292 narrower modes. This could help turning TRUNCATEs
1293 into SUBREGs. */
1294 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1296 /* Try this insn with each insn it links back to. */
1298 FOR_EACH_LOG_LINK (links, insn)
1299 if ((next = try_combine (insn, links->insn, NULL,
1300 NULL, &new_direct_jump_p,
1301 last_combined_insn)) != 0)
1303 statistics_counter_event (cfun, "two-insn combine", 1);
1304 goto retry;
1307 /* Try each sequence of three linked insns ending with this one. */
1309 if (max_combine >= 3)
1310 FOR_EACH_LOG_LINK (links, insn)
1312 rtx_insn *link = links->insn;
1314 /* If the linked insn has been replaced by a note, then there
1315 is no point in pursuing this chain any further. */
1316 if (NOTE_P (link))
1317 continue;
1319 FOR_EACH_LOG_LINK (nextlinks, link)
1320 if ((next = try_combine (insn, link, nextlinks->insn,
1321 NULL, &new_direct_jump_p,
1322 last_combined_insn)) != 0)
1324 statistics_counter_event (cfun, "three-insn combine", 1);
1325 goto retry;
1329 /* Try to combine a jump insn that uses CC0
1330 with a preceding insn that sets CC0, and maybe with its
1331 logical predecessor as well.
1332 This is how we make decrement-and-branch insns.
1333 We need this special code because data flow connections
1334 via CC0 do not get entered in LOG_LINKS. */
1336 if (HAVE_cc0
1337 && JUMP_P (insn)
1338 && (prev = prev_nonnote_insn (insn)) != 0
1339 && NONJUMP_INSN_P (prev)
1340 && sets_cc0_p (PATTERN (prev)))
1342 if ((next = try_combine (insn, prev, NULL, NULL,
1343 &new_direct_jump_p,
1344 last_combined_insn)) != 0)
1345 goto retry;
1347 FOR_EACH_LOG_LINK (nextlinks, prev)
1348 if ((next = try_combine (insn, prev, nextlinks->insn,
1349 NULL, &new_direct_jump_p,
1350 last_combined_insn)) != 0)
1351 goto retry;
1354 /* Do the same for an insn that explicitly references CC0. */
1355 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1356 && (prev = prev_nonnote_insn (insn)) != 0
1357 && NONJUMP_INSN_P (prev)
1358 && sets_cc0_p (PATTERN (prev))
1359 && GET_CODE (PATTERN (insn)) == SET
1360 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1362 if ((next = try_combine (insn, prev, NULL, NULL,
1363 &new_direct_jump_p,
1364 last_combined_insn)) != 0)
1365 goto retry;
1367 FOR_EACH_LOG_LINK (nextlinks, prev)
1368 if ((next = try_combine (insn, prev, nextlinks->insn,
1369 NULL, &new_direct_jump_p,
1370 last_combined_insn)) != 0)
1371 goto retry;
1374 /* Finally, see if any of the insns that this insn links to
1375 explicitly references CC0. If so, try this insn, that insn,
1376 and its predecessor if it sets CC0. */
1377 if (HAVE_cc0)
1379 FOR_EACH_LOG_LINK (links, insn)
1380 if (NONJUMP_INSN_P (links->insn)
1381 && GET_CODE (PATTERN (links->insn)) == SET
1382 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1383 && (prev = prev_nonnote_insn (links->insn)) != 0
1384 && NONJUMP_INSN_P (prev)
1385 && sets_cc0_p (PATTERN (prev))
1386 && (next = try_combine (insn, links->insn,
1387 prev, NULL, &new_direct_jump_p,
1388 last_combined_insn)) != 0)
1389 goto retry;
1392 /* Try combining an insn with two different insns whose results it
1393 uses. */
1394 if (max_combine >= 3)
1395 FOR_EACH_LOG_LINK (links, insn)
1396 for (nextlinks = links->next; nextlinks;
1397 nextlinks = nextlinks->next)
1398 if ((next = try_combine (insn, links->insn,
1399 nextlinks->insn, NULL,
1400 &new_direct_jump_p,
1401 last_combined_insn)) != 0)
1404 statistics_counter_event (cfun, "three-insn combine", 1);
1405 goto retry;
1408 /* Try four-instruction combinations. */
1409 if (max_combine >= 4)
1410 FOR_EACH_LOG_LINK (links, insn)
1412 struct insn_link *next1;
1413 rtx_insn *link = links->insn;
1415 /* If the linked insn has been replaced by a note, then there
1416 is no point in pursuing this chain any further. */
1417 if (NOTE_P (link))
1418 continue;
1420 FOR_EACH_LOG_LINK (next1, link)
1422 rtx_insn *link1 = next1->insn;
1423 if (NOTE_P (link1))
1424 continue;
1425 /* I0 -> I1 -> I2 -> I3. */
1426 FOR_EACH_LOG_LINK (nextlinks, link1)
1427 if ((next = try_combine (insn, link, link1,
1428 nextlinks->insn,
1429 &new_direct_jump_p,
1430 last_combined_insn)) != 0)
1432 statistics_counter_event (cfun, "four-insn combine", 1);
1433 goto retry;
1435 /* I0, I1 -> I2, I2 -> I3. */
1436 for (nextlinks = next1->next; nextlinks;
1437 nextlinks = nextlinks->next)
1438 if ((next = try_combine (insn, link, link1,
1439 nextlinks->insn,
1440 &new_direct_jump_p,
1441 last_combined_insn)) != 0)
1443 statistics_counter_event (cfun, "four-insn combine", 1);
1444 goto retry;
1448 for (next1 = links->next; next1; next1 = next1->next)
1450 rtx_insn *link1 = next1->insn;
1451 if (NOTE_P (link1))
1452 continue;
1453 /* I0 -> I2; I1, I2 -> I3. */
1454 FOR_EACH_LOG_LINK (nextlinks, link)
1455 if ((next = try_combine (insn, link, link1,
1456 nextlinks->insn,
1457 &new_direct_jump_p,
1458 last_combined_insn)) != 0)
1460 statistics_counter_event (cfun, "four-insn combine", 1);
1461 goto retry;
1463 /* I0 -> I1; I1, I2 -> I3. */
1464 FOR_EACH_LOG_LINK (nextlinks, link1)
1465 if ((next = try_combine (insn, link, link1,
1466 nextlinks->insn,
1467 &new_direct_jump_p,
1468 last_combined_insn)) != 0)
1470 statistics_counter_event (cfun, "four-insn combine", 1);
1471 goto retry;
1476 /* Try this insn with each REG_EQUAL note it links back to. */
1477 FOR_EACH_LOG_LINK (links, insn)
1479 rtx set, note;
1480 rtx_insn *temp = links->insn;
1481 if ((set = single_set (temp)) != 0
1482 && (note = find_reg_equal_equiv_note (temp)) != 0
1483 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1484 /* Avoid using a register that may already been marked
1485 dead by an earlier instruction. */
1486 && ! unmentioned_reg_p (note, SET_SRC (set))
1487 && (GET_MODE (note) == VOIDmode
1488 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1489 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1490 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1491 || (GET_MODE (XEXP (SET_DEST (set), 0))
1492 == GET_MODE (note))))))
1494 /* Temporarily replace the set's source with the
1495 contents of the REG_EQUAL note. The insn will
1496 be deleted or recognized by try_combine. */
1497 rtx orig_src = SET_SRC (set);
1498 rtx orig_dest = SET_DEST (set);
1499 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1500 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1501 SET_SRC (set) = note;
1502 i2mod = temp;
1503 i2mod_old_rhs = copy_rtx (orig_src);
1504 i2mod_new_rhs = copy_rtx (note);
1505 next = try_combine (insn, i2mod, NULL, NULL,
1506 &new_direct_jump_p,
1507 last_combined_insn);
1508 i2mod = NULL;
1509 if (next)
1511 statistics_counter_event (cfun, "insn-with-note combine", 1);
1512 goto retry;
1514 SET_SRC (set) = orig_src;
1515 SET_DEST (set) = orig_dest;
1519 if (!NOTE_P (insn))
1520 record_dead_and_set_regs (insn);
1522 retry:
1527 default_rtl_profile ();
1528 clear_bb_flags ();
1529 new_direct_jump_p |= purge_all_dead_edges ();
1530 delete_noop_moves ();
1532 /* Clean up. */
1533 obstack_free (&insn_link_obstack, NULL);
1534 free (uid_log_links);
1535 free (uid_insn_cost);
1536 reg_stat.release ();
1539 struct undo *undo, *next;
1540 for (undo = undobuf.frees; undo; undo = next)
1542 next = undo->next;
1543 free (undo);
1545 undobuf.frees = 0;
1548 total_attempts += combine_attempts;
1549 total_merges += combine_merges;
1550 total_extras += combine_extras;
1551 total_successes += combine_successes;
1553 nonzero_sign_valid = 0;
1554 rtl_hooks = general_rtl_hooks;
1556 /* Make recognizer allow volatile MEMs again. */
1557 init_recog ();
1559 return new_direct_jump_p;
1562 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1564 static void
1565 init_reg_last (void)
1567 unsigned int i;
1568 reg_stat_type *p;
1570 FOR_EACH_VEC_ELT (reg_stat, i, p)
1571 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1574 /* Set up any promoted values for incoming argument registers. */
1576 static void
1577 setup_incoming_promotions (rtx_insn *first)
1579 tree arg;
1580 bool strictly_local = false;
1582 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1583 arg = DECL_CHAIN (arg))
1585 rtx x, reg = DECL_INCOMING_RTL (arg);
1586 int uns1, uns3;
1587 machine_mode mode1, mode2, mode3, mode4;
1589 /* Only continue if the incoming argument is in a register. */
1590 if (!REG_P (reg))
1591 continue;
1593 /* Determine, if possible, whether all call sites of the current
1594 function lie within the current compilation unit. (This does
1595 take into account the exporting of a function via taking its
1596 address, and so forth.) */
1597 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1599 /* The mode and signedness of the argument before any promotions happen
1600 (equal to the mode of the pseudo holding it at that stage). */
1601 mode1 = TYPE_MODE (TREE_TYPE (arg));
1602 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1604 /* The mode and signedness of the argument after any source language and
1605 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1606 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1607 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1609 /* The mode and signedness of the argument as it is actually passed,
1610 see assign_parm_setup_reg in function.c. */
1611 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1612 TREE_TYPE (cfun->decl), 0);
1614 /* The mode of the register in which the argument is being passed. */
1615 mode4 = GET_MODE (reg);
1617 /* Eliminate sign extensions in the callee when:
1618 (a) A mode promotion has occurred; */
1619 if (mode1 == mode3)
1620 continue;
1621 /* (b) The mode of the register is the same as the mode of
1622 the argument as it is passed; */
1623 if (mode3 != mode4)
1624 continue;
1625 /* (c) There's no language level extension; */
1626 if (mode1 == mode2)
1628 /* (c.1) All callers are from the current compilation unit. If that's
1629 the case we don't have to rely on an ABI, we only have to know
1630 what we're generating right now, and we know that we will do the
1631 mode1 to mode2 promotion with the given sign. */
1632 else if (!strictly_local)
1633 continue;
1634 /* (c.2) The combination of the two promotions is useful. This is
1635 true when the signs match, or if the first promotion is unsigned.
1636 In the later case, (sign_extend (zero_extend x)) is the same as
1637 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1638 else if (uns1)
1639 uns3 = true;
1640 else if (uns3)
1641 continue;
1643 /* Record that the value was promoted from mode1 to mode3,
1644 so that any sign extension at the head of the current
1645 function may be eliminated. */
1646 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1647 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1648 record_value_for_reg (reg, first, x);
1652 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1653 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1654 because some machines (maybe most) will actually do the sign-extension and
1655 this is the conservative approach.
1657 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1658 kludge. */
1660 static rtx
1661 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1663 scalar_int_mode int_mode;
1664 if (CONST_INT_P (src)
1665 && is_a <scalar_int_mode> (mode, &int_mode)
1666 && GET_MODE_PRECISION (int_mode) < prec
1667 && INTVAL (src) > 0
1668 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1669 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1671 return src;
1674 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1675 and SET. */
1677 static void
1678 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1679 rtx x)
1681 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1682 unsigned HOST_WIDE_INT bits = 0;
1683 rtx reg_equal = NULL, src = SET_SRC (set);
1684 unsigned int num = 0;
1686 if (reg_equal_note)
1687 reg_equal = XEXP (reg_equal_note, 0);
1689 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1691 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1692 if (reg_equal)
1693 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1696 /* Don't call nonzero_bits if it cannot change anything. */
1697 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1699 bits = nonzero_bits (src, nonzero_bits_mode);
1700 if (reg_equal && bits)
1701 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1702 rsp->nonzero_bits |= bits;
1705 /* Don't call num_sign_bit_copies if it cannot change anything. */
1706 if (rsp->sign_bit_copies != 1)
1708 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1709 if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1711 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1712 if (num == 0 || numeq > num)
1713 num = numeq;
1715 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1716 rsp->sign_bit_copies = num;
1720 /* Called via note_stores. If X is a pseudo that is narrower than
1721 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1723 If we are setting only a portion of X and we can't figure out what
1724 portion, assume all bits will be used since we don't know what will
1725 be happening.
1727 Similarly, set how many bits of X are known to be copies of the sign bit
1728 at all locations in the function. This is the smallest number implied
1729 by any set of X. */
1731 static void
1732 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1734 rtx_insn *insn = (rtx_insn *) data;
1735 scalar_int_mode mode;
1737 if (REG_P (x)
1738 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1739 /* If this register is undefined at the start of the file, we can't
1740 say what its contents were. */
1741 && ! REGNO_REG_SET_P
1742 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1743 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1744 && HWI_COMPUTABLE_MODE_P (mode))
1746 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1748 if (set == 0 || GET_CODE (set) == CLOBBER)
1750 rsp->nonzero_bits = GET_MODE_MASK (mode);
1751 rsp->sign_bit_copies = 1;
1752 return;
1755 /* If this register is being initialized using itself, and the
1756 register is uninitialized in this basic block, and there are
1757 no LOG_LINKS which set the register, then part of the
1758 register is uninitialized. In that case we can't assume
1759 anything about the number of nonzero bits.
1761 ??? We could do better if we checked this in
1762 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1763 could avoid making assumptions about the insn which initially
1764 sets the register, while still using the information in other
1765 insns. We would have to be careful to check every insn
1766 involved in the combination. */
1768 if (insn
1769 && reg_referenced_p (x, PATTERN (insn))
1770 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1771 REGNO (x)))
1773 struct insn_link *link;
1775 FOR_EACH_LOG_LINK (link, insn)
1776 if (dead_or_set_p (link->insn, x))
1777 break;
1778 if (!link)
1780 rsp->nonzero_bits = GET_MODE_MASK (mode);
1781 rsp->sign_bit_copies = 1;
1782 return;
1786 /* If this is a complex assignment, see if we can convert it into a
1787 simple assignment. */
1788 set = expand_field_assignment (set);
1790 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1791 set what we know about X. */
1793 if (SET_DEST (set) == x
1794 || (paradoxical_subreg_p (SET_DEST (set))
1795 && SUBREG_REG (SET_DEST (set)) == x))
1796 update_rsp_from_reg_equal (rsp, insn, set, x);
1797 else
1799 rsp->nonzero_bits = GET_MODE_MASK (mode);
1800 rsp->sign_bit_copies = 1;
1805 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1806 optionally insns that were previously combined into I3 or that will be
1807 combined into the merger of INSN and I3. The order is PRED, PRED2,
1808 INSN, SUCC, SUCC2, I3.
1810 Return 0 if the combination is not allowed for any reason.
1812 If the combination is allowed, *PDEST will be set to the single
1813 destination of INSN and *PSRC to the single source, and this function
1814 will return 1. */
1816 static int
1817 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1818 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1819 rtx *pdest, rtx *psrc)
1821 int i;
1822 const_rtx set = 0;
1823 rtx src, dest;
1824 rtx_insn *p;
1825 rtx link;
1826 bool all_adjacent = true;
1827 int (*is_volatile_p) (const_rtx);
1829 if (succ)
1831 if (succ2)
1833 if (next_active_insn (succ2) != i3)
1834 all_adjacent = false;
1835 if (next_active_insn (succ) != succ2)
1836 all_adjacent = false;
1838 else if (next_active_insn (succ) != i3)
1839 all_adjacent = false;
1840 if (next_active_insn (insn) != succ)
1841 all_adjacent = false;
1843 else if (next_active_insn (insn) != i3)
1844 all_adjacent = false;
1846 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1847 or a PARALLEL consisting of such a SET and CLOBBERs.
1849 If INSN has CLOBBER parallel parts, ignore them for our processing.
1850 By definition, these happen during the execution of the insn. When it
1851 is merged with another insn, all bets are off. If they are, in fact,
1852 needed and aren't also supplied in I3, they may be added by
1853 recog_for_combine. Otherwise, it won't match.
1855 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1856 note.
1858 Get the source and destination of INSN. If more than one, can't
1859 combine. */
1861 if (GET_CODE (PATTERN (insn)) == SET)
1862 set = PATTERN (insn);
1863 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1864 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1866 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1868 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1870 switch (GET_CODE (elt))
1872 /* This is important to combine floating point insns
1873 for the SH4 port. */
1874 case USE:
1875 /* Combining an isolated USE doesn't make sense.
1876 We depend here on combinable_i3pat to reject them. */
1877 /* The code below this loop only verifies that the inputs of
1878 the SET in INSN do not change. We call reg_set_between_p
1879 to verify that the REG in the USE does not change between
1880 I3 and INSN.
1881 If the USE in INSN was for a pseudo register, the matching
1882 insn pattern will likely match any register; combining this
1883 with any other USE would only be safe if we knew that the
1884 used registers have identical values, or if there was
1885 something to tell them apart, e.g. different modes. For
1886 now, we forgo such complicated tests and simply disallow
1887 combining of USES of pseudo registers with any other USE. */
1888 if (REG_P (XEXP (elt, 0))
1889 && GET_CODE (PATTERN (i3)) == PARALLEL)
1891 rtx i3pat = PATTERN (i3);
1892 int i = XVECLEN (i3pat, 0) - 1;
1893 unsigned int regno = REGNO (XEXP (elt, 0));
1897 rtx i3elt = XVECEXP (i3pat, 0, i);
1899 if (GET_CODE (i3elt) == USE
1900 && REG_P (XEXP (i3elt, 0))
1901 && (REGNO (XEXP (i3elt, 0)) == regno
1902 ? reg_set_between_p (XEXP (elt, 0),
1903 PREV_INSN (insn), i3)
1904 : regno >= FIRST_PSEUDO_REGISTER))
1905 return 0;
1907 while (--i >= 0);
1909 break;
1911 /* We can ignore CLOBBERs. */
1912 case CLOBBER:
1913 break;
1915 case SET:
1916 /* Ignore SETs whose result isn't used but not those that
1917 have side-effects. */
1918 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1919 && insn_nothrow_p (insn)
1920 && !side_effects_p (elt))
1921 break;
1923 /* If we have already found a SET, this is a second one and
1924 so we cannot combine with this insn. */
1925 if (set)
1926 return 0;
1928 set = elt;
1929 break;
1931 default:
1932 /* Anything else means we can't combine. */
1933 return 0;
1937 if (set == 0
1938 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1939 so don't do anything with it. */
1940 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1941 return 0;
1943 else
1944 return 0;
1946 if (set == 0)
1947 return 0;
1949 /* The simplification in expand_field_assignment may call back to
1950 get_last_value, so set safe guard here. */
1951 subst_low_luid = DF_INSN_LUID (insn);
1953 set = expand_field_assignment (set);
1954 src = SET_SRC (set), dest = SET_DEST (set);
1956 /* Do not eliminate user-specified register if it is in an
1957 asm input because we may break the register asm usage defined
1958 in GCC manual if allow to do so.
1959 Be aware that this may cover more cases than we expect but this
1960 should be harmless. */
1961 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1962 && extract_asm_operands (PATTERN (i3)))
1963 return 0;
1965 /* Don't eliminate a store in the stack pointer. */
1966 if (dest == stack_pointer_rtx
1967 /* Don't combine with an insn that sets a register to itself if it has
1968 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1969 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1970 /* Can't merge an ASM_OPERANDS. */
1971 || GET_CODE (src) == ASM_OPERANDS
1972 /* Can't merge a function call. */
1973 || GET_CODE (src) == CALL
1974 /* Don't eliminate a function call argument. */
1975 || (CALL_P (i3)
1976 && (find_reg_fusage (i3, USE, dest)
1977 || (REG_P (dest)
1978 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1979 && global_regs[REGNO (dest)])))
1980 /* Don't substitute into an incremented register. */
1981 || FIND_REG_INC_NOTE (i3, dest)
1982 || (succ && FIND_REG_INC_NOTE (succ, dest))
1983 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1984 /* Don't substitute into a non-local goto, this confuses CFG. */
1985 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1986 /* Make sure that DEST is not used after INSN but before SUCC, or
1987 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1988 || (!all_adjacent
1989 && ((succ2
1990 && (reg_used_between_p (dest, succ2, i3)
1991 || reg_used_between_p (dest, succ, succ2)))
1992 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1993 || (succ
1994 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1995 that case SUCC is not in the insn stream, so use SUCC2
1996 instead for this test. */
1997 && reg_used_between_p (dest, insn,
1998 succ2
1999 && INSN_UID (succ) == INSN_UID (succ2)
2000 ? succ2 : succ))))
2001 /* Make sure that the value that is to be substituted for the register
2002 does not use any registers whose values alter in between. However,
2003 If the insns are adjacent, a use can't cross a set even though we
2004 think it might (this can happen for a sequence of insns each setting
2005 the same destination; last_set of that register might point to
2006 a NOTE). If INSN has a REG_EQUIV note, the register is always
2007 equivalent to the memory so the substitution is valid even if there
2008 are intervening stores. Also, don't move a volatile asm or
2009 UNSPEC_VOLATILE across any other insns. */
2010 || (! all_adjacent
2011 && (((!MEM_P (src)
2012 || ! find_reg_note (insn, REG_EQUIV, src))
2013 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
2014 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2015 || GET_CODE (src) == UNSPEC_VOLATILE))
2016 /* Don't combine across a CALL_INSN, because that would possibly
2017 change whether the life span of some REGs crosses calls or not,
2018 and it is a pain to update that information.
2019 Exception: if source is a constant, moving it later can't hurt.
2020 Accept that as a special case. */
2021 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2022 return 0;
2024 /* DEST must either be a REG or CC0. */
2025 if (REG_P (dest))
2027 /* If register alignment is being enforced for multi-word items in all
2028 cases except for parameters, it is possible to have a register copy
2029 insn referencing a hard register that is not allowed to contain the
2030 mode being copied and which would not be valid as an operand of most
2031 insns. Eliminate this problem by not combining with such an insn.
2033 Also, on some machines we don't want to extend the life of a hard
2034 register. */
2036 if (REG_P (src)
2037 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2038 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2039 /* Don't extend the life of a hard register unless it is
2040 user variable (if we have few registers) or it can't
2041 fit into the desired register (meaning something special
2042 is going on).
2043 Also avoid substituting a return register into I3, because
2044 reload can't handle a conflict with constraints of other
2045 inputs. */
2046 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2047 && !targetm.hard_regno_mode_ok (REGNO (src),
2048 GET_MODE (src)))))
2049 return 0;
2051 else if (GET_CODE (dest) != CC0)
2052 return 0;
2055 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2056 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2057 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2059 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2061 /* If the clobber represents an earlyclobber operand, we must not
2062 substitute an expression containing the clobbered register.
2063 As we do not analyze the constraint strings here, we have to
2064 make the conservative assumption. However, if the register is
2065 a fixed hard reg, the clobber cannot represent any operand;
2066 we leave it up to the machine description to either accept or
2067 reject use-and-clobber patterns. */
2068 if (!REG_P (reg)
2069 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2070 || !fixed_regs[REGNO (reg)])
2071 if (reg_overlap_mentioned_p (reg, src))
2072 return 0;
2075 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2076 or not), reject, unless nothing volatile comes between it and I3 */
2078 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2080 /* Make sure neither succ nor succ2 contains a volatile reference. */
2081 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2082 return 0;
2083 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2084 return 0;
2085 /* We'll check insns between INSN and I3 below. */
2088 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2089 to be an explicit register variable, and was chosen for a reason. */
2091 if (GET_CODE (src) == ASM_OPERANDS
2092 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2093 return 0;
2095 /* If INSN contains volatile references (specifically volatile MEMs),
2096 we cannot combine across any other volatile references.
2097 Even if INSN doesn't contain volatile references, any intervening
2098 volatile insn might affect machine state. */
2100 is_volatile_p = volatile_refs_p (PATTERN (insn))
2101 ? volatile_refs_p
2102 : volatile_insn_p;
2104 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2105 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2106 return 0;
2108 /* If INSN contains an autoincrement or autodecrement, make sure that
2109 register is not used between there and I3, and not already used in
2110 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2111 Also insist that I3 not be a jump; if it were one
2112 and the incremented register were spilled, we would lose. */
2114 if (AUTO_INC_DEC)
2115 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2116 if (REG_NOTE_KIND (link) == REG_INC
2117 && (JUMP_P (i3)
2118 || reg_used_between_p (XEXP (link, 0), insn, i3)
2119 || (pred != NULL_RTX
2120 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2121 || (pred2 != NULL_RTX
2122 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2123 || (succ != NULL_RTX
2124 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2125 || (succ2 != NULL_RTX
2126 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2127 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2128 return 0;
2130 /* Don't combine an insn that follows a CC0-setting insn.
2131 An insn that uses CC0 must not be separated from the one that sets it.
2132 We do, however, allow I2 to follow a CC0-setting insn if that insn
2133 is passed as I1; in that case it will be deleted also.
2134 We also allow combining in this case if all the insns are adjacent
2135 because that would leave the two CC0 insns adjacent as well.
2136 It would be more logical to test whether CC0 occurs inside I1 or I2,
2137 but that would be much slower, and this ought to be equivalent. */
2139 if (HAVE_cc0)
2141 p = prev_nonnote_insn (insn);
2142 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2143 && ! all_adjacent)
2144 return 0;
2147 /* If we get here, we have passed all the tests and the combination is
2148 to be allowed. */
2150 *pdest = dest;
2151 *psrc = src;
2153 return 1;
2156 /* LOC is the location within I3 that contains its pattern or the component
2157 of a PARALLEL of the pattern. We validate that it is valid for combining.
2159 One problem is if I3 modifies its output, as opposed to replacing it
2160 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2161 doing so would produce an insn that is not equivalent to the original insns.
2163 Consider:
2165 (set (reg:DI 101) (reg:DI 100))
2166 (set (subreg:SI (reg:DI 101) 0) <foo>)
2168 This is NOT equivalent to:
2170 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2171 (set (reg:DI 101) (reg:DI 100))])
2173 Not only does this modify 100 (in which case it might still be valid
2174 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2176 We can also run into a problem if I2 sets a register that I1
2177 uses and I1 gets directly substituted into I3 (not via I2). In that
2178 case, we would be getting the wrong value of I2DEST into I3, so we
2179 must reject the combination. This case occurs when I2 and I1 both
2180 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2181 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2182 of a SET must prevent combination from occurring. The same situation
2183 can occur for I0, in which case I0_NOT_IN_SRC is set.
2185 Before doing the above check, we first try to expand a field assignment
2186 into a set of logical operations.
2188 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2189 we place a register that is both set and used within I3. If more than one
2190 such register is detected, we fail.
2192 Return 1 if the combination is valid, zero otherwise. */
2194 static int
2195 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2196 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2198 rtx x = *loc;
2200 if (GET_CODE (x) == SET)
2202 rtx set = x ;
2203 rtx dest = SET_DEST (set);
2204 rtx src = SET_SRC (set);
2205 rtx inner_dest = dest;
2206 rtx subdest;
2208 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2209 || GET_CODE (inner_dest) == SUBREG
2210 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2211 inner_dest = XEXP (inner_dest, 0);
2213 /* Check for the case where I3 modifies its output, as discussed
2214 above. We don't want to prevent pseudos from being combined
2215 into the address of a MEM, so only prevent the combination if
2216 i1 or i2 set the same MEM. */
2217 if ((inner_dest != dest &&
2218 (!MEM_P (inner_dest)
2219 || rtx_equal_p (i2dest, inner_dest)
2220 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2221 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2222 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2223 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2224 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2226 /* This is the same test done in can_combine_p except we can't test
2227 all_adjacent; we don't have to, since this instruction will stay
2228 in place, thus we are not considering increasing the lifetime of
2229 INNER_DEST.
2231 Also, if this insn sets a function argument, combining it with
2232 something that might need a spill could clobber a previous
2233 function argument; the all_adjacent test in can_combine_p also
2234 checks this; here, we do a more specific test for this case. */
2236 || (REG_P (inner_dest)
2237 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2238 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2239 GET_MODE (inner_dest)))
2240 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2241 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2242 return 0;
2244 /* If DEST is used in I3, it is being killed in this insn, so
2245 record that for later. We have to consider paradoxical
2246 subregs here, since they kill the whole register, but we
2247 ignore partial subregs, STRICT_LOW_PART, etc.
2248 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2249 STACK_POINTER_REGNUM, since these are always considered to be
2250 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2251 subdest = dest;
2252 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2253 subdest = SUBREG_REG (subdest);
2254 if (pi3dest_killed
2255 && REG_P (subdest)
2256 && reg_referenced_p (subdest, PATTERN (i3))
2257 && REGNO (subdest) != FRAME_POINTER_REGNUM
2258 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2259 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2260 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2261 || (REGNO (subdest) != ARG_POINTER_REGNUM
2262 || ! fixed_regs [REGNO (subdest)]))
2263 && REGNO (subdest) != STACK_POINTER_REGNUM)
2265 if (*pi3dest_killed)
2266 return 0;
2268 *pi3dest_killed = subdest;
2272 else if (GET_CODE (x) == PARALLEL)
2274 int i;
2276 for (i = 0; i < XVECLEN (x, 0); i++)
2277 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2278 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2279 return 0;
2282 return 1;
2285 /* Return 1 if X is an arithmetic expression that contains a multiplication
2286 and division. We don't count multiplications by powers of two here. */
2288 static int
2289 contains_muldiv (rtx x)
2291 switch (GET_CODE (x))
2293 case MOD: case DIV: case UMOD: case UDIV:
2294 return 1;
2296 case MULT:
2297 return ! (CONST_INT_P (XEXP (x, 1))
2298 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2299 default:
2300 if (BINARY_P (x))
2301 return contains_muldiv (XEXP (x, 0))
2302 || contains_muldiv (XEXP (x, 1));
2304 if (UNARY_P (x))
2305 return contains_muldiv (XEXP (x, 0));
2307 return 0;
2311 /* Determine whether INSN can be used in a combination. Return nonzero if
2312 not. This is used in try_combine to detect early some cases where we
2313 can't perform combinations. */
2315 static int
2316 cant_combine_insn_p (rtx_insn *insn)
2318 rtx set;
2319 rtx src, dest;
2321 /* If this isn't really an insn, we can't do anything.
2322 This can occur when flow deletes an insn that it has merged into an
2323 auto-increment address. */
2324 if (!NONDEBUG_INSN_P (insn))
2325 return 1;
2327 /* Never combine loads and stores involving hard regs that are likely
2328 to be spilled. The register allocator can usually handle such
2329 reg-reg moves by tying. If we allow the combiner to make
2330 substitutions of likely-spilled regs, reload might die.
2331 As an exception, we allow combinations involving fixed regs; these are
2332 not available to the register allocator so there's no risk involved. */
2334 set = single_set (insn);
2335 if (! set)
2336 return 0;
2337 src = SET_SRC (set);
2338 dest = SET_DEST (set);
2339 if (GET_CODE (src) == SUBREG)
2340 src = SUBREG_REG (src);
2341 if (GET_CODE (dest) == SUBREG)
2342 dest = SUBREG_REG (dest);
2343 if (REG_P (src) && REG_P (dest)
2344 && ((HARD_REGISTER_P (src)
2345 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2346 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2347 || (HARD_REGISTER_P (dest)
2348 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2349 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2350 return 1;
2352 return 0;
2355 struct likely_spilled_retval_info
2357 unsigned regno, nregs;
2358 unsigned mask;
2361 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2362 hard registers that are known to be written to / clobbered in full. */
2363 static void
2364 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2366 struct likely_spilled_retval_info *const info =
2367 (struct likely_spilled_retval_info *) data;
2368 unsigned regno, nregs;
2369 unsigned new_mask;
2371 if (!REG_P (XEXP (set, 0)))
2372 return;
2373 regno = REGNO (x);
2374 if (regno >= info->regno + info->nregs)
2375 return;
2376 nregs = REG_NREGS (x);
2377 if (regno + nregs <= info->regno)
2378 return;
2379 new_mask = (2U << (nregs - 1)) - 1;
2380 if (regno < info->regno)
2381 new_mask >>= info->regno - regno;
2382 else
2383 new_mask <<= regno - info->regno;
2384 info->mask &= ~new_mask;
2387 /* Return nonzero iff part of the return value is live during INSN, and
2388 it is likely spilled. This can happen when more than one insn is needed
2389 to copy the return value, e.g. when we consider to combine into the
2390 second copy insn for a complex value. */
2392 static int
2393 likely_spilled_retval_p (rtx_insn *insn)
2395 rtx_insn *use = BB_END (this_basic_block);
2396 rtx reg;
2397 rtx_insn *p;
2398 unsigned regno, nregs;
2399 /* We assume here that no machine mode needs more than
2400 32 hard registers when the value overlaps with a register
2401 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2402 unsigned mask;
2403 struct likely_spilled_retval_info info;
2405 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2406 return 0;
2407 reg = XEXP (PATTERN (use), 0);
2408 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2409 return 0;
2410 regno = REGNO (reg);
2411 nregs = REG_NREGS (reg);
2412 if (nregs == 1)
2413 return 0;
2414 mask = (2U << (nregs - 1)) - 1;
2416 /* Disregard parts of the return value that are set later. */
2417 info.regno = regno;
2418 info.nregs = nregs;
2419 info.mask = mask;
2420 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2421 if (INSN_P (p))
2422 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2423 mask = info.mask;
2425 /* Check if any of the (probably) live return value registers is
2426 likely spilled. */
2427 nregs --;
2430 if ((mask & 1 << nregs)
2431 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2432 return 1;
2433 } while (nregs--);
2434 return 0;
2437 /* Adjust INSN after we made a change to its destination.
2439 Changing the destination can invalidate notes that say something about
2440 the results of the insn and a LOG_LINK pointing to the insn. */
2442 static void
2443 adjust_for_new_dest (rtx_insn *insn)
2445 /* For notes, be conservative and simply remove them. */
2446 remove_reg_equal_equiv_notes (insn);
2448 /* The new insn will have a destination that was previously the destination
2449 of an insn just above it. Call distribute_links to make a LOG_LINK from
2450 the next use of that destination. */
2452 rtx set = single_set (insn);
2453 gcc_assert (set);
2455 rtx reg = SET_DEST (set);
2457 while (GET_CODE (reg) == ZERO_EXTRACT
2458 || GET_CODE (reg) == STRICT_LOW_PART
2459 || GET_CODE (reg) == SUBREG)
2460 reg = XEXP (reg, 0);
2461 gcc_assert (REG_P (reg));
2463 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2465 df_insn_rescan (insn);
2468 /* Return TRUE if combine can reuse reg X in mode MODE.
2469 ADDED_SETS is nonzero if the original set is still required. */
2470 static bool
2471 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2473 unsigned int regno;
2475 if (!REG_P (x))
2476 return false;
2478 /* Don't change between modes with different underlying register sizes,
2479 since this could lead to invalid subregs. */
2480 if (REGMODE_NATURAL_SIZE (mode)
2481 != REGMODE_NATURAL_SIZE (GET_MODE (x)))
2482 return false;
2484 regno = REGNO (x);
2485 /* Allow hard registers if the new mode is legal, and occupies no more
2486 registers than the old mode. */
2487 if (regno < FIRST_PSEUDO_REGISTER)
2488 return (targetm.hard_regno_mode_ok (regno, mode)
2489 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2491 /* Or a pseudo that is only used once. */
2492 return (regno < reg_n_sets_max
2493 && REG_N_SETS (regno) == 1
2494 && !added_sets
2495 && !REG_USERVAR_P (x));
2499 /* Check whether X, the destination of a set, refers to part of
2500 the register specified by REG. */
2502 static bool
2503 reg_subword_p (rtx x, rtx reg)
2505 /* Check that reg is an integer mode register. */
2506 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2507 return false;
2509 if (GET_CODE (x) == STRICT_LOW_PART
2510 || GET_CODE (x) == ZERO_EXTRACT)
2511 x = XEXP (x, 0);
2513 return GET_CODE (x) == SUBREG
2514 && SUBREG_REG (x) == reg
2515 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2518 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2519 Note that the INSN should be deleted *after* removing dead edges, so
2520 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2521 but not for a (set (pc) (label_ref FOO)). */
2523 static void
2524 update_cfg_for_uncondjump (rtx_insn *insn)
2526 basic_block bb = BLOCK_FOR_INSN (insn);
2527 gcc_assert (BB_END (bb) == insn);
2529 purge_dead_edges (bb);
2531 delete_insn (insn);
2532 if (EDGE_COUNT (bb->succs) == 1)
2534 rtx_insn *insn;
2536 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2538 /* Remove barriers from the footer if there are any. */
2539 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2540 if (BARRIER_P (insn))
2542 if (PREV_INSN (insn))
2543 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2544 else
2545 BB_FOOTER (bb) = NEXT_INSN (insn);
2546 if (NEXT_INSN (insn))
2547 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2549 else if (LABEL_P (insn))
2550 break;
2554 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2555 by an arbitrary number of CLOBBERs. */
2556 static bool
2557 is_parallel_of_n_reg_sets (rtx pat, int n)
2559 if (GET_CODE (pat) != PARALLEL)
2560 return false;
2562 int len = XVECLEN (pat, 0);
2563 if (len < n)
2564 return false;
2566 int i;
2567 for (i = 0; i < n; i++)
2568 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2569 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2570 return false;
2571 for ( ; i < len; i++)
2572 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2573 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2574 return false;
2576 return true;
2579 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2580 CLOBBERs), can be split into individual SETs in that order, without
2581 changing semantics. */
2582 static bool
2583 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2585 if (!insn_nothrow_p (insn))
2586 return false;
2588 rtx pat = PATTERN (insn);
2590 int i, j;
2591 for (i = 0; i < n; i++)
2593 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2594 return false;
2596 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2598 for (j = i + 1; j < n; j++)
2599 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2600 return false;
2603 return true;
2606 /* Try to combine the insns I0, I1 and I2 into I3.
2607 Here I0, I1 and I2 appear earlier than I3.
2608 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2611 If we are combining more than two insns and the resulting insn is not
2612 recognized, try splitting it into two insns. If that happens, I2 and I3
2613 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2614 Otherwise, I0, I1 and I2 are pseudo-deleted.
2616 Return 0 if the combination does not work. Then nothing is changed.
2617 If we did the combination, return the insn at which combine should
2618 resume scanning.
2620 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2621 new direct jump instruction.
2623 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2624 been I3 passed to an earlier try_combine within the same basic
2625 block. */
2627 static rtx_insn *
2628 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2629 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2631 /* New patterns for I3 and I2, respectively. */
2632 rtx newpat, newi2pat = 0;
2633 rtvec newpat_vec_with_clobbers = 0;
2634 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2635 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2636 dead. */
2637 int added_sets_0, added_sets_1, added_sets_2;
2638 /* Total number of SETs to put into I3. */
2639 int total_sets;
2640 /* Nonzero if I2's or I1's body now appears in I3. */
2641 int i2_is_used = 0, i1_is_used = 0;
2642 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2643 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2644 /* Contains I3 if the destination of I3 is used in its source, which means
2645 that the old life of I3 is being killed. If that usage is placed into
2646 I2 and not in I3, a REG_DEAD note must be made. */
2647 rtx i3dest_killed = 0;
2648 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2649 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2650 /* Copy of SET_SRC of I1 and I0, if needed. */
2651 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2652 /* Set if I2DEST was reused as a scratch register. */
2653 bool i2scratch = false;
2654 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2655 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2656 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2657 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2658 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2659 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2660 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2661 /* Notes that must be added to REG_NOTES in I3 and I2. */
2662 rtx new_i3_notes, new_i2_notes;
2663 /* Notes that we substituted I3 into I2 instead of the normal case. */
2664 int i3_subst_into_i2 = 0;
2665 /* Notes that I1, I2 or I3 is a MULT operation. */
2666 int have_mult = 0;
2667 int swap_i2i3 = 0;
2668 int changed_i3_dest = 0;
2670 int maxreg;
2671 rtx_insn *temp_insn;
2672 rtx temp_expr;
2673 struct insn_link *link;
2674 rtx other_pat = 0;
2675 rtx new_other_notes;
2676 int i;
2677 scalar_int_mode dest_mode, temp_mode;
2679 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2680 never be). */
2681 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2682 return 0;
2684 /* Only try four-insn combinations when there's high likelihood of
2685 success. Look for simple insns, such as loads of constants or
2686 binary operations involving a constant. */
2687 if (i0)
2689 int i;
2690 int ngood = 0;
2691 int nshift = 0;
2692 rtx set0, set3;
2694 if (!flag_expensive_optimizations)
2695 return 0;
2697 for (i = 0; i < 4; i++)
2699 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2700 rtx set = single_set (insn);
2701 rtx src;
2702 if (!set)
2703 continue;
2704 src = SET_SRC (set);
2705 if (CONSTANT_P (src))
2707 ngood += 2;
2708 break;
2710 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2711 ngood++;
2712 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2713 || GET_CODE (src) == LSHIFTRT)
2714 nshift++;
2717 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2718 are likely manipulating its value. Ideally we'll be able to combine
2719 all four insns into a bitfield insertion of some kind.
2721 Note the source in I0 might be inside a sign/zero extension and the
2722 memory modes in I0 and I3 might be different. So extract the address
2723 from the destination of I3 and search for it in the source of I0.
2725 In the event that there's a match but the source/dest do not actually
2726 refer to the same memory, the worst that happens is we try some
2727 combinations that we wouldn't have otherwise. */
2728 if ((set0 = single_set (i0))
2729 /* Ensure the source of SET0 is a MEM, possibly buried inside
2730 an extension. */
2731 && (GET_CODE (SET_SRC (set0)) == MEM
2732 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2733 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2734 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2735 && (set3 = single_set (i3))
2736 /* Ensure the destination of SET3 is a MEM. */
2737 && GET_CODE (SET_DEST (set3)) == MEM
2738 /* Would it be better to extract the base address for the MEM
2739 in SET3 and look for that? I don't have cases where it matters
2740 but I could envision such cases. */
2741 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2742 ngood += 2;
2744 if (ngood < 2 && nshift < 2)
2745 return 0;
2748 /* Exit early if one of the insns involved can't be used for
2749 combinations. */
2750 if (CALL_P (i2)
2751 || (i1 && CALL_P (i1))
2752 || (i0 && CALL_P (i0))
2753 || cant_combine_insn_p (i3)
2754 || cant_combine_insn_p (i2)
2755 || (i1 && cant_combine_insn_p (i1))
2756 || (i0 && cant_combine_insn_p (i0))
2757 || likely_spilled_retval_p (i3))
2758 return 0;
2760 combine_attempts++;
2761 undobuf.other_insn = 0;
2763 /* Reset the hard register usage information. */
2764 CLEAR_HARD_REG_SET (newpat_used_regs);
2766 if (dump_file && (dump_flags & TDF_DETAILS))
2768 if (i0)
2769 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2770 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2771 else if (i1)
2772 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2773 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2774 else
2775 fprintf (dump_file, "\nTrying %d -> %d:\n",
2776 INSN_UID (i2), INSN_UID (i3));
2779 /* If multiple insns feed into one of I2 or I3, they can be in any
2780 order. To simplify the code below, reorder them in sequence. */
2781 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2782 std::swap (i0, i2);
2783 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2784 std::swap (i0, i1);
2785 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2786 std::swap (i1, i2);
2788 added_links_insn = 0;
2790 /* First check for one important special case that the code below will
2791 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2792 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2793 we may be able to replace that destination with the destination of I3.
2794 This occurs in the common code where we compute both a quotient and
2795 remainder into a structure, in which case we want to do the computation
2796 directly into the structure to avoid register-register copies.
2798 Note that this case handles both multiple sets in I2 and also cases
2799 where I2 has a number of CLOBBERs inside the PARALLEL.
2801 We make very conservative checks below and only try to handle the
2802 most common cases of this. For example, we only handle the case
2803 where I2 and I3 are adjacent to avoid making difficult register
2804 usage tests. */
2806 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2807 && REG_P (SET_SRC (PATTERN (i3)))
2808 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2809 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2810 && GET_CODE (PATTERN (i2)) == PARALLEL
2811 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2812 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2813 below would need to check what is inside (and reg_overlap_mentioned_p
2814 doesn't support those codes anyway). Don't allow those destinations;
2815 the resulting insn isn't likely to be recognized anyway. */
2816 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2817 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2818 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2819 SET_DEST (PATTERN (i3)))
2820 && next_active_insn (i2) == i3)
2822 rtx p2 = PATTERN (i2);
2824 /* Make sure that the destination of I3,
2825 which we are going to substitute into one output of I2,
2826 is not used within another output of I2. We must avoid making this:
2827 (parallel [(set (mem (reg 69)) ...)
2828 (set (reg 69) ...)])
2829 which is not well-defined as to order of actions.
2830 (Besides, reload can't handle output reloads for this.)
2832 The problem can also happen if the dest of I3 is a memory ref,
2833 if another dest in I2 is an indirect memory ref.
2835 Neither can this PARALLEL be an asm. We do not allow combining
2836 that usually (see can_combine_p), so do not here either. */
2837 bool ok = true;
2838 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2840 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2841 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2842 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2843 SET_DEST (XVECEXP (p2, 0, i))))
2844 ok = false;
2845 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2846 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2847 ok = false;
2850 if (ok)
2851 for (i = 0; i < XVECLEN (p2, 0); i++)
2852 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2853 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2855 combine_merges++;
2857 subst_insn = i3;
2858 subst_low_luid = DF_INSN_LUID (i2);
2860 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2861 i2src = SET_SRC (XVECEXP (p2, 0, i));
2862 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2863 i2dest_killed = dead_or_set_p (i2, i2dest);
2865 /* Replace the dest in I2 with our dest and make the resulting
2866 insn the new pattern for I3. Then skip to where we validate
2867 the pattern. Everything was set up above. */
2868 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2869 newpat = p2;
2870 i3_subst_into_i2 = 1;
2871 goto validate_replacement;
2875 /* If I2 is setting a pseudo to a constant and I3 is setting some
2876 sub-part of it to another constant, merge them by making a new
2877 constant. */
2878 if (i1 == 0
2879 && (temp_expr = single_set (i2)) != 0
2880 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2881 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2882 && GET_CODE (PATTERN (i3)) == SET
2883 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2884 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2886 rtx dest = SET_DEST (PATTERN (i3));
2887 rtx temp_dest = SET_DEST (temp_expr);
2888 int offset = -1;
2889 int width = 0;
2891 if (GET_CODE (dest) == ZERO_EXTRACT)
2893 if (CONST_INT_P (XEXP (dest, 1))
2894 && CONST_INT_P (XEXP (dest, 2))
2895 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2896 &dest_mode))
2898 width = INTVAL (XEXP (dest, 1));
2899 offset = INTVAL (XEXP (dest, 2));
2900 dest = XEXP (dest, 0);
2901 if (BITS_BIG_ENDIAN)
2902 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2905 else
2907 if (GET_CODE (dest) == STRICT_LOW_PART)
2908 dest = XEXP (dest, 0);
2909 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2911 width = GET_MODE_PRECISION (dest_mode);
2912 offset = 0;
2916 if (offset >= 0)
2918 /* If this is the low part, we're done. */
2919 if (subreg_lowpart_p (dest))
2921 /* Handle the case where inner is twice the size of outer. */
2922 else if (GET_MODE_PRECISION (temp_mode)
2923 == 2 * GET_MODE_PRECISION (dest_mode))
2924 offset += GET_MODE_PRECISION (dest_mode);
2925 /* Otherwise give up for now. */
2926 else
2927 offset = -1;
2930 if (offset >= 0)
2932 rtx inner = SET_SRC (PATTERN (i3));
2933 rtx outer = SET_SRC (temp_expr);
2935 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2936 rtx_mode_t (inner, dest_mode),
2937 offset, width);
2939 combine_merges++;
2940 subst_insn = i3;
2941 subst_low_luid = DF_INSN_LUID (i2);
2942 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2943 i2dest = temp_dest;
2944 i2dest_killed = dead_or_set_p (i2, i2dest);
2946 /* Replace the source in I2 with the new constant and make the
2947 resulting insn the new pattern for I3. Then skip to where we
2948 validate the pattern. Everything was set up above. */
2949 SUBST (SET_SRC (temp_expr),
2950 immed_wide_int_const (o, temp_mode));
2952 newpat = PATTERN (i2);
2954 /* The dest of I3 has been replaced with the dest of I2. */
2955 changed_i3_dest = 1;
2956 goto validate_replacement;
2960 /* If we have no I1 and I2 looks like:
2961 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2962 (set Y OP)])
2963 make up a dummy I1 that is
2964 (set Y OP)
2965 and change I2 to be
2966 (set (reg:CC X) (compare:CC Y (const_int 0)))
2968 (We can ignore any trailing CLOBBERs.)
2970 This undoes a previous combination and allows us to match a branch-and-
2971 decrement insn. */
2973 if (!HAVE_cc0 && i1 == 0
2974 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2975 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2976 == MODE_CC)
2977 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2978 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2979 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2980 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2981 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2982 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2984 /* We make I1 with the same INSN_UID as I2. This gives it
2985 the same DF_INSN_LUID for value tracking. Our fake I1 will
2986 never appear in the insn stream so giving it the same INSN_UID
2987 as I2 will not cause a problem. */
2989 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2990 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2991 -1, NULL_RTX);
2992 INSN_UID (i1) = INSN_UID (i2);
2994 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2995 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2996 SET_DEST (PATTERN (i1)));
2997 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2998 SUBST_LINK (LOG_LINKS (i2),
2999 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3002 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3003 make those two SETs separate I1 and I2 insns, and make an I0 that is
3004 the original I1. */
3005 if (!HAVE_cc0 && i0 == 0
3006 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3007 && can_split_parallel_of_n_reg_sets (i2, 2)
3008 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3009 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3011 /* If there is no I1, there is no I0 either. */
3012 i0 = i1;
3014 /* We make I1 with the same INSN_UID as I2. This gives it
3015 the same DF_INSN_LUID for value tracking. Our fake I1 will
3016 never appear in the insn stream so giving it the same INSN_UID
3017 as I2 will not cause a problem. */
3019 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3020 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3021 -1, NULL_RTX);
3022 INSN_UID (i1) = INSN_UID (i2);
3024 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3027 /* Verify that I2 and I1 are valid for combining. */
3028 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
3029 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
3030 &i1dest, &i1src))
3031 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
3032 &i0dest, &i0src)))
3034 undo_all ();
3035 return 0;
3038 /* Record whether I2DEST is used in I2SRC and similarly for the other
3039 cases. Knowing this will help in register status updating below. */
3040 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3041 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3042 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3043 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3044 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3045 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3046 i2dest_killed = dead_or_set_p (i2, i2dest);
3047 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3048 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3050 /* For the earlier insns, determine which of the subsequent ones they
3051 feed. */
3052 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3053 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3054 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3055 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3056 && reg_overlap_mentioned_p (i0dest, i2src))));
3058 /* Ensure that I3's pattern can be the destination of combines. */
3059 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3060 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3061 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3062 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3063 &i3dest_killed))
3065 undo_all ();
3066 return 0;
3069 /* See if any of the insns is a MULT operation. Unless one is, we will
3070 reject a combination that is, since it must be slower. Be conservative
3071 here. */
3072 if (GET_CODE (i2src) == MULT
3073 || (i1 != 0 && GET_CODE (i1src) == MULT)
3074 || (i0 != 0 && GET_CODE (i0src) == MULT)
3075 || (GET_CODE (PATTERN (i3)) == SET
3076 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3077 have_mult = 1;
3079 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3080 We used to do this EXCEPT in one case: I3 has a post-inc in an
3081 output operand. However, that exception can give rise to insns like
3082 mov r3,(r3)+
3083 which is a famous insn on the PDP-11 where the value of r3 used as the
3084 source was model-dependent. Avoid this sort of thing. */
3086 #if 0
3087 if (!(GET_CODE (PATTERN (i3)) == SET
3088 && REG_P (SET_SRC (PATTERN (i3)))
3089 && MEM_P (SET_DEST (PATTERN (i3)))
3090 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3091 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3092 /* It's not the exception. */
3093 #endif
3094 if (AUTO_INC_DEC)
3096 rtx link;
3097 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3098 if (REG_NOTE_KIND (link) == REG_INC
3099 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3100 || (i1 != 0
3101 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3103 undo_all ();
3104 return 0;
3108 /* See if the SETs in I1 or I2 need to be kept around in the merged
3109 instruction: whenever the value set there is still needed past I3.
3110 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3112 For the SET in I1, we have two cases: if I1 and I2 independently feed
3113 into I3, the set in I1 needs to be kept around unless I1DEST dies
3114 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3115 in I1 needs to be kept around unless I1DEST dies or is set in either
3116 I2 or I3. The same considerations apply to I0. */
3118 added_sets_2 = !dead_or_set_p (i3, i2dest);
3120 if (i1)
3121 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3122 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3123 else
3124 added_sets_1 = 0;
3126 if (i0)
3127 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3128 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3129 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3130 && dead_or_set_p (i2, i0dest)));
3131 else
3132 added_sets_0 = 0;
3134 /* We are about to copy insns for the case where they need to be kept
3135 around. Check that they can be copied in the merged instruction. */
3137 if (targetm.cannot_copy_insn_p
3138 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3139 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3140 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3142 undo_all ();
3143 return 0;
3146 /* If the set in I2 needs to be kept around, we must make a copy of
3147 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3148 PATTERN (I2), we are only substituting for the original I1DEST, not into
3149 an already-substituted copy. This also prevents making self-referential
3150 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3151 I2DEST. */
3153 if (added_sets_2)
3155 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3156 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3157 else
3158 i2pat = copy_rtx (PATTERN (i2));
3161 if (added_sets_1)
3163 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3164 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3165 else
3166 i1pat = copy_rtx (PATTERN (i1));
3169 if (added_sets_0)
3171 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3172 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3173 else
3174 i0pat = copy_rtx (PATTERN (i0));
3177 combine_merges++;
3179 /* Substitute in the latest insn for the regs set by the earlier ones. */
3181 maxreg = max_reg_num ();
3183 subst_insn = i3;
3185 /* Many machines that don't use CC0 have insns that can both perform an
3186 arithmetic operation and set the condition code. These operations will
3187 be represented as a PARALLEL with the first element of the vector
3188 being a COMPARE of an arithmetic operation with the constant zero.
3189 The second element of the vector will set some pseudo to the result
3190 of the same arithmetic operation. If we simplify the COMPARE, we won't
3191 match such a pattern and so will generate an extra insn. Here we test
3192 for this case, where both the comparison and the operation result are
3193 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3194 I2SRC. Later we will make the PARALLEL that contains I2. */
3196 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3197 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3198 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3199 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3201 rtx newpat_dest;
3202 rtx *cc_use_loc = NULL;
3203 rtx_insn *cc_use_insn = NULL;
3204 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3205 machine_mode compare_mode, orig_compare_mode;
3206 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3207 scalar_int_mode mode;
3209 newpat = PATTERN (i3);
3210 newpat_dest = SET_DEST (newpat);
3211 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3213 if (undobuf.other_insn == 0
3214 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3215 &cc_use_insn)))
3217 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3218 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3219 compare_code = simplify_compare_const (compare_code, mode,
3220 op0, &op1);
3221 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3224 /* Do the rest only if op1 is const0_rtx, which may be the
3225 result of simplification. */
3226 if (op1 == const0_rtx)
3228 /* If a single use of the CC is found, prepare to modify it
3229 when SELECT_CC_MODE returns a new CC-class mode, or when
3230 the above simplify_compare_const() returned a new comparison
3231 operator. undobuf.other_insn is assigned the CC use insn
3232 when modifying it. */
3233 if (cc_use_loc)
3235 #ifdef SELECT_CC_MODE
3236 machine_mode new_mode
3237 = SELECT_CC_MODE (compare_code, op0, op1);
3238 if (new_mode != orig_compare_mode
3239 && can_change_dest_mode (SET_DEST (newpat),
3240 added_sets_2, new_mode))
3242 unsigned int regno = REGNO (newpat_dest);
3243 compare_mode = new_mode;
3244 if (regno < FIRST_PSEUDO_REGISTER)
3245 newpat_dest = gen_rtx_REG (compare_mode, regno);
3246 else
3248 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3249 newpat_dest = regno_reg_rtx[regno];
3252 #endif
3253 /* Cases for modifying the CC-using comparison. */
3254 if (compare_code != orig_compare_code
3255 /* ??? Do we need to verify the zero rtx? */
3256 && XEXP (*cc_use_loc, 1) == const0_rtx)
3258 /* Replace cc_use_loc with entire new RTX. */
3259 SUBST (*cc_use_loc,
3260 gen_rtx_fmt_ee (compare_code, compare_mode,
3261 newpat_dest, const0_rtx));
3262 undobuf.other_insn = cc_use_insn;
3264 else if (compare_mode != orig_compare_mode)
3266 /* Just replace the CC reg with a new mode. */
3267 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3268 undobuf.other_insn = cc_use_insn;
3272 /* Now we modify the current newpat:
3273 First, SET_DEST(newpat) is updated if the CC mode has been
3274 altered. For targets without SELECT_CC_MODE, this should be
3275 optimized away. */
3276 if (compare_mode != orig_compare_mode)
3277 SUBST (SET_DEST (newpat), newpat_dest);
3278 /* This is always done to propagate i2src into newpat. */
3279 SUBST (SET_SRC (newpat),
3280 gen_rtx_COMPARE (compare_mode, op0, op1));
3281 /* Create new version of i2pat if needed; the below PARALLEL
3282 creation needs this to work correctly. */
3283 if (! rtx_equal_p (i2src, op0))
3284 i2pat = gen_rtx_SET (i2dest, op0);
3285 i2_is_used = 1;
3289 if (i2_is_used == 0)
3291 /* It is possible that the source of I2 or I1 may be performing
3292 an unneeded operation, such as a ZERO_EXTEND of something
3293 that is known to have the high part zero. Handle that case
3294 by letting subst look at the inner insns.
3296 Another way to do this would be to have a function that tries
3297 to simplify a single insn instead of merging two or more
3298 insns. We don't do this because of the potential of infinite
3299 loops and because of the potential extra memory required.
3300 However, doing it the way we are is a bit of a kludge and
3301 doesn't catch all cases.
3303 But only do this if -fexpensive-optimizations since it slows
3304 things down and doesn't usually win.
3306 This is not done in the COMPARE case above because the
3307 unmodified I2PAT is used in the PARALLEL and so a pattern
3308 with a modified I2SRC would not match. */
3310 if (flag_expensive_optimizations)
3312 /* Pass pc_rtx so no substitutions are done, just
3313 simplifications. */
3314 if (i1)
3316 subst_low_luid = DF_INSN_LUID (i1);
3317 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3320 subst_low_luid = DF_INSN_LUID (i2);
3321 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3324 n_occurrences = 0; /* `subst' counts here */
3325 subst_low_luid = DF_INSN_LUID (i2);
3327 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3328 copy of I2SRC each time we substitute it, in order to avoid creating
3329 self-referential RTL when we will be substituting I1SRC for I1DEST
3330 later. Likewise if I0 feeds into I2, either directly or indirectly
3331 through I1, and I0DEST is in I0SRC. */
3332 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3333 (i1_feeds_i2_n && i1dest_in_i1src)
3334 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3335 && i0dest_in_i0src));
3336 substed_i2 = 1;
3338 /* Record whether I2's body now appears within I3's body. */
3339 i2_is_used = n_occurrences;
3342 /* If we already got a failure, don't try to do more. Otherwise, try to
3343 substitute I1 if we have it. */
3345 if (i1 && GET_CODE (newpat) != CLOBBER)
3347 /* Check that an autoincrement side-effect on I1 has not been lost.
3348 This happens if I1DEST is mentioned in I2 and dies there, and
3349 has disappeared from the new pattern. */
3350 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3351 && i1_feeds_i2_n
3352 && dead_or_set_p (i2, i1dest)
3353 && !reg_overlap_mentioned_p (i1dest, newpat))
3354 /* Before we can do this substitution, we must redo the test done
3355 above (see detailed comments there) that ensures I1DEST isn't
3356 mentioned in any SETs in NEWPAT that are field assignments. */
3357 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3358 0, 0, 0))
3360 undo_all ();
3361 return 0;
3364 n_occurrences = 0;
3365 subst_low_luid = DF_INSN_LUID (i1);
3367 /* If the following substitution will modify I1SRC, make a copy of it
3368 for the case where it is substituted for I1DEST in I2PAT later. */
3369 if (added_sets_2 && i1_feeds_i2_n)
3370 i1src_copy = copy_rtx (i1src);
3372 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3373 copy of I1SRC each time we substitute it, in order to avoid creating
3374 self-referential RTL when we will be substituting I0SRC for I0DEST
3375 later. */
3376 newpat = subst (newpat, i1dest, i1src, 0, 0,
3377 i0_feeds_i1_n && i0dest_in_i0src);
3378 substed_i1 = 1;
3380 /* Record whether I1's body now appears within I3's body. */
3381 i1_is_used = n_occurrences;
3384 /* Likewise for I0 if we have it. */
3386 if (i0 && GET_CODE (newpat) != CLOBBER)
3388 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3389 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3390 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3391 && !reg_overlap_mentioned_p (i0dest, newpat))
3392 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3393 0, 0, 0))
3395 undo_all ();
3396 return 0;
3399 /* If the following substitution will modify I0SRC, make a copy of it
3400 for the case where it is substituted for I0DEST in I1PAT later. */
3401 if (added_sets_1 && i0_feeds_i1_n)
3402 i0src_copy = copy_rtx (i0src);
3403 /* And a copy for I0DEST in I2PAT substitution. */
3404 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3405 || (i0_feeds_i2_n)))
3406 i0src_copy2 = copy_rtx (i0src);
3408 n_occurrences = 0;
3409 subst_low_luid = DF_INSN_LUID (i0);
3410 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3411 substed_i0 = 1;
3414 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3415 to count all the ways that I2SRC and I1SRC can be used. */
3416 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3417 && i2_is_used + added_sets_2 > 1)
3418 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3419 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3420 > 1))
3421 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3422 && (n_occurrences + added_sets_0
3423 + (added_sets_1 && i0_feeds_i1_n)
3424 + (added_sets_2 && i0_feeds_i2_n)
3425 > 1))
3426 /* Fail if we tried to make a new register. */
3427 || max_reg_num () != maxreg
3428 /* Fail if we couldn't do something and have a CLOBBER. */
3429 || GET_CODE (newpat) == CLOBBER
3430 /* Fail if this new pattern is a MULT and we didn't have one before
3431 at the outer level. */
3432 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3433 && ! have_mult))
3435 undo_all ();
3436 return 0;
3439 /* If the actions of the earlier insns must be kept
3440 in addition to substituting them into the latest one,
3441 we must make a new PARALLEL for the latest insn
3442 to hold additional the SETs. */
3444 if (added_sets_0 || added_sets_1 || added_sets_2)
3446 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3447 combine_extras++;
3449 if (GET_CODE (newpat) == PARALLEL)
3451 rtvec old = XVEC (newpat, 0);
3452 total_sets = XVECLEN (newpat, 0) + extra_sets;
3453 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3454 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3455 sizeof (old->elem[0]) * old->num_elem);
3457 else
3459 rtx old = newpat;
3460 total_sets = 1 + extra_sets;
3461 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3462 XVECEXP (newpat, 0, 0) = old;
3465 if (added_sets_0)
3466 XVECEXP (newpat, 0, --total_sets) = i0pat;
3468 if (added_sets_1)
3470 rtx t = i1pat;
3471 if (i0_feeds_i1_n)
3472 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3474 XVECEXP (newpat, 0, --total_sets) = t;
3476 if (added_sets_2)
3478 rtx t = i2pat;
3479 if (i1_feeds_i2_n)
3480 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3481 i0_feeds_i1_n && i0dest_in_i0src);
3482 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3483 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3485 XVECEXP (newpat, 0, --total_sets) = t;
3489 validate_replacement:
3491 /* Note which hard regs this insn has as inputs. */
3492 mark_used_regs_combine (newpat);
3494 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3495 consider splitting this pattern, we might need these clobbers. */
3496 if (i1 && GET_CODE (newpat) == PARALLEL
3497 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3499 int len = XVECLEN (newpat, 0);
3501 newpat_vec_with_clobbers = rtvec_alloc (len);
3502 for (i = 0; i < len; i++)
3503 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3506 /* We have recognized nothing yet. */
3507 insn_code_number = -1;
3509 /* See if this is a PARALLEL of two SETs where one SET's destination is
3510 a register that is unused and this isn't marked as an instruction that
3511 might trap in an EH region. In that case, we just need the other SET.
3512 We prefer this over the PARALLEL.
3514 This can occur when simplifying a divmod insn. We *must* test for this
3515 case here because the code below that splits two independent SETs doesn't
3516 handle this case correctly when it updates the register status.
3518 It's pointless doing this if we originally had two sets, one from
3519 i3, and one from i2. Combining then splitting the parallel results
3520 in the original i2 again plus an invalid insn (which we delete).
3521 The net effect is only to move instructions around, which makes
3522 debug info less accurate.
3524 If the remaining SET came from I2 its destination should not be used
3525 between I2 and I3. See PR82024. */
3527 if (!(added_sets_2 && i1 == 0)
3528 && is_parallel_of_n_reg_sets (newpat, 2)
3529 && asm_noperands (newpat) < 0)
3531 rtx set0 = XVECEXP (newpat, 0, 0);
3532 rtx set1 = XVECEXP (newpat, 0, 1);
3533 rtx oldpat = newpat;
3535 if (((REG_P (SET_DEST (set1))
3536 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3537 || (GET_CODE (SET_DEST (set1)) == SUBREG
3538 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3539 && insn_nothrow_p (i3)
3540 && !side_effects_p (SET_SRC (set1)))
3542 newpat = set0;
3543 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3546 else if (((REG_P (SET_DEST (set0))
3547 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3548 || (GET_CODE (SET_DEST (set0)) == SUBREG
3549 && find_reg_note (i3, REG_UNUSED,
3550 SUBREG_REG (SET_DEST (set0)))))
3551 && insn_nothrow_p (i3)
3552 && !side_effects_p (SET_SRC (set0)))
3554 rtx dest = SET_DEST (set1);
3555 if (GET_CODE (dest) == SUBREG)
3556 dest = SUBREG_REG (dest);
3557 if (!reg_used_between_p (dest, i2, i3))
3559 newpat = set1;
3560 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3562 if (insn_code_number >= 0)
3563 changed_i3_dest = 1;
3567 if (insn_code_number < 0)
3568 newpat = oldpat;
3571 /* Is the result of combination a valid instruction? */
3572 if (insn_code_number < 0)
3573 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3575 /* If we were combining three insns and the result is a simple SET
3576 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3577 insns. There are two ways to do this. It can be split using a
3578 machine-specific method (like when you have an addition of a large
3579 constant) or by combine in the function find_split_point. */
3581 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3582 && asm_noperands (newpat) < 0)
3584 rtx parallel, *split;
3585 rtx_insn *m_split_insn;
3587 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3588 use I2DEST as a scratch register will help. In the latter case,
3589 convert I2DEST to the mode of the source of NEWPAT if we can. */
3591 m_split_insn = combine_split_insns (newpat, i3);
3593 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3594 inputs of NEWPAT. */
3596 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3597 possible to try that as a scratch reg. This would require adding
3598 more code to make it work though. */
3600 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3602 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3604 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3605 (temporarily, until we are committed to this instruction
3606 combination) does not work: for example, any call to nonzero_bits
3607 on the register (from a splitter in the MD file, for example)
3608 will get the old information, which is invalid.
3610 Since nowadays we can create registers during combine just fine,
3611 we should just create a new one here, not reuse i2dest. */
3613 /* First try to split using the original register as a
3614 scratch register. */
3615 parallel = gen_rtx_PARALLEL (VOIDmode,
3616 gen_rtvec (2, newpat,
3617 gen_rtx_CLOBBER (VOIDmode,
3618 i2dest)));
3619 m_split_insn = combine_split_insns (parallel, i3);
3621 /* If that didn't work, try changing the mode of I2DEST if
3622 we can. */
3623 if (m_split_insn == 0
3624 && new_mode != GET_MODE (i2dest)
3625 && new_mode != VOIDmode
3626 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3628 machine_mode old_mode = GET_MODE (i2dest);
3629 rtx ni2dest;
3631 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3632 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3633 else
3635 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3636 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3639 parallel = (gen_rtx_PARALLEL
3640 (VOIDmode,
3641 gen_rtvec (2, newpat,
3642 gen_rtx_CLOBBER (VOIDmode,
3643 ni2dest))));
3644 m_split_insn = combine_split_insns (parallel, i3);
3646 if (m_split_insn == 0
3647 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3649 struct undo *buf;
3651 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3652 buf = undobuf.undos;
3653 undobuf.undos = buf->next;
3654 buf->next = undobuf.frees;
3655 undobuf.frees = buf;
3659 i2scratch = m_split_insn != 0;
3662 /* If recog_for_combine has discarded clobbers, try to use them
3663 again for the split. */
3664 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3666 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3667 m_split_insn = combine_split_insns (parallel, i3);
3670 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3672 rtx m_split_pat = PATTERN (m_split_insn);
3673 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3674 if (insn_code_number >= 0)
3675 newpat = m_split_pat;
3677 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3678 && (next_nonnote_nondebug_insn (i2) == i3
3679 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3681 rtx i2set, i3set;
3682 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3683 newi2pat = PATTERN (m_split_insn);
3685 i3set = single_set (NEXT_INSN (m_split_insn));
3686 i2set = single_set (m_split_insn);
3688 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3690 /* If I2 or I3 has multiple SETs, we won't know how to track
3691 register status, so don't use these insns. If I2's destination
3692 is used between I2 and I3, we also can't use these insns. */
3694 if (i2_code_number >= 0 && i2set && i3set
3695 && (next_nonnote_nondebug_insn (i2) == i3
3696 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3697 insn_code_number = recog_for_combine (&newi3pat, i3,
3698 &new_i3_notes);
3699 if (insn_code_number >= 0)
3700 newpat = newi3pat;
3702 /* It is possible that both insns now set the destination of I3.
3703 If so, we must show an extra use of it. */
3705 if (insn_code_number >= 0)
3707 rtx new_i3_dest = SET_DEST (i3set);
3708 rtx new_i2_dest = SET_DEST (i2set);
3710 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3711 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3712 || GET_CODE (new_i3_dest) == SUBREG)
3713 new_i3_dest = XEXP (new_i3_dest, 0);
3715 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3716 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3717 || GET_CODE (new_i2_dest) == SUBREG)
3718 new_i2_dest = XEXP (new_i2_dest, 0);
3720 if (REG_P (new_i3_dest)
3721 && REG_P (new_i2_dest)
3722 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3723 && REGNO (new_i2_dest) < reg_n_sets_max)
3724 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3728 /* If we can split it and use I2DEST, go ahead and see if that
3729 helps things be recognized. Verify that none of the registers
3730 are set between I2 and I3. */
3731 if (insn_code_number < 0
3732 && (split = find_split_point (&newpat, i3, false)) != 0
3733 && (!HAVE_cc0 || REG_P (i2dest))
3734 /* We need I2DEST in the proper mode. If it is a hard register
3735 or the only use of a pseudo, we can change its mode.
3736 Make sure we don't change a hard register to have a mode that
3737 isn't valid for it, or change the number of registers. */
3738 && (GET_MODE (*split) == GET_MODE (i2dest)
3739 || GET_MODE (*split) == VOIDmode
3740 || can_change_dest_mode (i2dest, added_sets_2,
3741 GET_MODE (*split)))
3742 && (next_nonnote_nondebug_insn (i2) == i3
3743 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3744 /* We can't overwrite I2DEST if its value is still used by
3745 NEWPAT. */
3746 && ! reg_referenced_p (i2dest, newpat))
3748 rtx newdest = i2dest;
3749 enum rtx_code split_code = GET_CODE (*split);
3750 machine_mode split_mode = GET_MODE (*split);
3751 bool subst_done = false;
3752 newi2pat = NULL_RTX;
3754 i2scratch = true;
3756 /* *SPLIT may be part of I2SRC, so make sure we have the
3757 original expression around for later debug processing.
3758 We should not need I2SRC any more in other cases. */
3759 if (MAY_HAVE_DEBUG_INSNS)
3760 i2src = copy_rtx (i2src);
3761 else
3762 i2src = NULL;
3764 /* Get NEWDEST as a register in the proper mode. We have already
3765 validated that we can do this. */
3766 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3768 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3769 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3770 else
3772 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3773 newdest = regno_reg_rtx[REGNO (i2dest)];
3777 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3778 an ASHIFT. This can occur if it was inside a PLUS and hence
3779 appeared to be a memory address. This is a kludge. */
3780 if (split_code == MULT
3781 && CONST_INT_P (XEXP (*split, 1))
3782 && INTVAL (XEXP (*split, 1)) > 0
3783 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3785 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3786 XEXP (*split, 0), GEN_INT (i)));
3787 /* Update split_code because we may not have a multiply
3788 anymore. */
3789 split_code = GET_CODE (*split);
3792 /* Similarly for (plus (mult FOO (const_int pow2))). */
3793 if (split_code == PLUS
3794 && GET_CODE (XEXP (*split, 0)) == MULT
3795 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3796 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3797 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3799 rtx nsplit = XEXP (*split, 0);
3800 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3801 XEXP (nsplit, 0), GEN_INT (i)));
3802 /* Update split_code because we may not have a multiply
3803 anymore. */
3804 split_code = GET_CODE (*split);
3807 #ifdef INSN_SCHEDULING
3808 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3809 be written as a ZERO_EXTEND. */
3810 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3812 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3813 what it really is. */
3814 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3815 == SIGN_EXTEND)
3816 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3817 SUBREG_REG (*split)));
3818 else
3819 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3820 SUBREG_REG (*split)));
3822 #endif
3824 /* Attempt to split binary operators using arithmetic identities. */
3825 if (BINARY_P (SET_SRC (newpat))
3826 && split_mode == GET_MODE (SET_SRC (newpat))
3827 && ! side_effects_p (SET_SRC (newpat)))
3829 rtx setsrc = SET_SRC (newpat);
3830 machine_mode mode = GET_MODE (setsrc);
3831 enum rtx_code code = GET_CODE (setsrc);
3832 rtx src_op0 = XEXP (setsrc, 0);
3833 rtx src_op1 = XEXP (setsrc, 1);
3835 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3836 if (rtx_equal_p (src_op0, src_op1))
3838 newi2pat = gen_rtx_SET (newdest, src_op0);
3839 SUBST (XEXP (setsrc, 0), newdest);
3840 SUBST (XEXP (setsrc, 1), newdest);
3841 subst_done = true;
3843 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3844 else if ((code == PLUS || code == MULT)
3845 && GET_CODE (src_op0) == code
3846 && GET_CODE (XEXP (src_op0, 0)) == code
3847 && (INTEGRAL_MODE_P (mode)
3848 || (FLOAT_MODE_P (mode)
3849 && flag_unsafe_math_optimizations)))
3851 rtx p = XEXP (XEXP (src_op0, 0), 0);
3852 rtx q = XEXP (XEXP (src_op0, 0), 1);
3853 rtx r = XEXP (src_op0, 1);
3854 rtx s = src_op1;
3856 /* Split both "((X op Y) op X) op Y" and
3857 "((X op Y) op Y) op X" as "T op T" where T is
3858 "X op Y". */
3859 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3860 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3862 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3863 SUBST (XEXP (setsrc, 0), newdest);
3864 SUBST (XEXP (setsrc, 1), newdest);
3865 subst_done = true;
3867 /* Split "((X op X) op Y) op Y)" as "T op T" where
3868 T is "X op Y". */
3869 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3871 rtx tmp = simplify_gen_binary (code, mode, p, r);
3872 newi2pat = gen_rtx_SET (newdest, tmp);
3873 SUBST (XEXP (setsrc, 0), newdest);
3874 SUBST (XEXP (setsrc, 1), newdest);
3875 subst_done = true;
3880 if (!subst_done)
3882 newi2pat = gen_rtx_SET (newdest, *split);
3883 SUBST (*split, newdest);
3886 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3888 /* recog_for_combine might have added CLOBBERs to newi2pat.
3889 Make sure NEWPAT does not depend on the clobbered regs. */
3890 if (GET_CODE (newi2pat) == PARALLEL)
3891 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3892 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3894 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3895 if (reg_overlap_mentioned_p (reg, newpat))
3897 undo_all ();
3898 return 0;
3902 /* If the split point was a MULT and we didn't have one before,
3903 don't use one now. */
3904 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3905 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3909 /* Check for a case where we loaded from memory in a narrow mode and
3910 then sign extended it, but we need both registers. In that case,
3911 we have a PARALLEL with both loads from the same memory location.
3912 We can split this into a load from memory followed by a register-register
3913 copy. This saves at least one insn, more if register allocation can
3914 eliminate the copy.
3916 We cannot do this if the destination of the first assignment is a
3917 condition code register or cc0. We eliminate this case by making sure
3918 the SET_DEST and SET_SRC have the same mode.
3920 We cannot do this if the destination of the second assignment is
3921 a register that we have already assumed is zero-extended. Similarly
3922 for a SUBREG of such a register. */
3924 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3925 && GET_CODE (newpat) == PARALLEL
3926 && XVECLEN (newpat, 0) == 2
3927 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3928 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3929 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3930 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3931 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3932 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3933 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3934 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3935 DF_INSN_LUID (i2))
3936 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3937 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3938 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3939 (REG_P (temp_expr)
3940 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3941 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3942 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3943 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3944 != GET_MODE_MASK (word_mode))))
3945 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3946 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3947 (REG_P (temp_expr)
3948 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3949 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3950 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3951 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3952 != GET_MODE_MASK (word_mode)))))
3953 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3954 SET_SRC (XVECEXP (newpat, 0, 1)))
3955 && ! find_reg_note (i3, REG_UNUSED,
3956 SET_DEST (XVECEXP (newpat, 0, 0))))
3958 rtx ni2dest;
3960 newi2pat = XVECEXP (newpat, 0, 0);
3961 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3962 newpat = XVECEXP (newpat, 0, 1);
3963 SUBST (SET_SRC (newpat),
3964 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3965 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3967 if (i2_code_number >= 0)
3968 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3970 if (insn_code_number >= 0)
3971 swap_i2i3 = 1;
3974 /* Similarly, check for a case where we have a PARALLEL of two independent
3975 SETs but we started with three insns. In this case, we can do the sets
3976 as two separate insns. This case occurs when some SET allows two
3977 other insns to combine, but the destination of that SET is still live.
3979 Also do this if we started with two insns and (at least) one of the
3980 resulting sets is a noop; this noop will be deleted later. */
3982 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3983 && GET_CODE (newpat) == PARALLEL
3984 && XVECLEN (newpat, 0) == 2
3985 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3986 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3987 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3988 || set_noop_p (XVECEXP (newpat, 0, 1)))
3989 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3990 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3991 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3992 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3993 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3994 XVECEXP (newpat, 0, 0))
3995 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3996 XVECEXP (newpat, 0, 1))
3997 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3998 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4000 rtx set0 = XVECEXP (newpat, 0, 0);
4001 rtx set1 = XVECEXP (newpat, 0, 1);
4003 /* Normally, it doesn't matter which of the two is done first,
4004 but the one that references cc0 can't be the second, and
4005 one which uses any regs/memory set in between i2 and i3 can't
4006 be first. The PARALLEL might also have been pre-existing in i3,
4007 so we need to make sure that we won't wrongly hoist a SET to i2
4008 that would conflict with a death note present in there. */
4009 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
4010 && !(REG_P (SET_DEST (set1))
4011 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4012 && !(GET_CODE (SET_DEST (set1)) == SUBREG
4013 && find_reg_note (i2, REG_DEAD,
4014 SUBREG_REG (SET_DEST (set1))))
4015 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4016 /* If I3 is a jump, ensure that set0 is a jump so that
4017 we do not create invalid RTL. */
4018 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4021 newi2pat = set1;
4022 newpat = set0;
4024 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
4025 && !(REG_P (SET_DEST (set0))
4026 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4027 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4028 && find_reg_note (i2, REG_DEAD,
4029 SUBREG_REG (SET_DEST (set0))))
4030 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4031 /* If I3 is a jump, ensure that set1 is a jump so that
4032 we do not create invalid RTL. */
4033 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4036 newi2pat = set0;
4037 newpat = set1;
4039 else
4041 undo_all ();
4042 return 0;
4045 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4047 if (i2_code_number >= 0)
4049 /* recog_for_combine might have added CLOBBERs to newi2pat.
4050 Make sure NEWPAT does not depend on the clobbered regs. */
4051 if (GET_CODE (newi2pat) == PARALLEL)
4053 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4054 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4056 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4057 if (reg_overlap_mentioned_p (reg, newpat))
4059 undo_all ();
4060 return 0;
4065 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4069 /* If it still isn't recognized, fail and change things back the way they
4070 were. */
4071 if ((insn_code_number < 0
4072 /* Is the result a reasonable ASM_OPERANDS? */
4073 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4075 undo_all ();
4076 return 0;
4079 /* If we had to change another insn, make sure it is valid also. */
4080 if (undobuf.other_insn)
4082 CLEAR_HARD_REG_SET (newpat_used_regs);
4084 other_pat = PATTERN (undobuf.other_insn);
4085 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4086 &new_other_notes);
4088 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4090 undo_all ();
4091 return 0;
4095 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4096 they are adjacent to each other or not. */
4097 if (HAVE_cc0)
4099 rtx_insn *p = prev_nonnote_insn (i3);
4100 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4101 && sets_cc0_p (newi2pat))
4103 undo_all ();
4104 return 0;
4108 /* Only allow this combination if insn_cost reports that the
4109 replacement instructions are cheaper than the originals. */
4110 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4112 undo_all ();
4113 return 0;
4116 if (MAY_HAVE_DEBUG_INSNS)
4118 struct undo *undo;
4120 for (undo = undobuf.undos; undo; undo = undo->next)
4121 if (undo->kind == UNDO_MODE)
4123 rtx reg = *undo->where.r;
4124 machine_mode new_mode = GET_MODE (reg);
4125 machine_mode old_mode = undo->old_contents.m;
4127 /* Temporarily revert mode back. */
4128 adjust_reg_mode (reg, old_mode);
4130 if (reg == i2dest && i2scratch)
4132 /* If we used i2dest as a scratch register with a
4133 different mode, substitute it for the original
4134 i2src while its original mode is temporarily
4135 restored, and then clear i2scratch so that we don't
4136 do it again later. */
4137 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4138 this_basic_block);
4139 i2scratch = false;
4140 /* Put back the new mode. */
4141 adjust_reg_mode (reg, new_mode);
4143 else
4145 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4146 rtx_insn *first, *last;
4148 if (reg == i2dest)
4150 first = i2;
4151 last = last_combined_insn;
4153 else
4155 first = i3;
4156 last = undobuf.other_insn;
4157 gcc_assert (last);
4158 if (DF_INSN_LUID (last)
4159 < DF_INSN_LUID (last_combined_insn))
4160 last = last_combined_insn;
4163 /* We're dealing with a reg that changed mode but not
4164 meaning, so we want to turn it into a subreg for
4165 the new mode. However, because of REG sharing and
4166 because its mode had already changed, we have to do
4167 it in two steps. First, replace any debug uses of
4168 reg, with its original mode temporarily restored,
4169 with this copy we have created; then, replace the
4170 copy with the SUBREG of the original shared reg,
4171 once again changed to the new mode. */
4172 propagate_for_debug (first, last, reg, tempreg,
4173 this_basic_block);
4174 adjust_reg_mode (reg, new_mode);
4175 propagate_for_debug (first, last, tempreg,
4176 lowpart_subreg (old_mode, reg, new_mode),
4177 this_basic_block);
4182 /* If we will be able to accept this, we have made a
4183 change to the destination of I3. This requires us to
4184 do a few adjustments. */
4186 if (changed_i3_dest)
4188 PATTERN (i3) = newpat;
4189 adjust_for_new_dest (i3);
4192 /* We now know that we can do this combination. Merge the insns and
4193 update the status of registers and LOG_LINKS. */
4195 if (undobuf.other_insn)
4197 rtx note, next;
4199 PATTERN (undobuf.other_insn) = other_pat;
4201 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4202 ensure that they are still valid. Then add any non-duplicate
4203 notes added by recog_for_combine. */
4204 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4206 next = XEXP (note, 1);
4208 if ((REG_NOTE_KIND (note) == REG_DEAD
4209 && !reg_referenced_p (XEXP (note, 0),
4210 PATTERN (undobuf.other_insn)))
4211 ||(REG_NOTE_KIND (note) == REG_UNUSED
4212 && !reg_set_p (XEXP (note, 0),
4213 PATTERN (undobuf.other_insn)))
4214 /* Simply drop equal note since it may be no longer valid
4215 for other_insn. It may be possible to record that CC
4216 register is changed and only discard those notes, but
4217 in practice it's unnecessary complication and doesn't
4218 give any meaningful improvement.
4220 See PR78559. */
4221 || REG_NOTE_KIND (note) == REG_EQUAL
4222 || REG_NOTE_KIND (note) == REG_EQUIV)
4223 remove_note (undobuf.other_insn, note);
4226 distribute_notes (new_other_notes, undobuf.other_insn,
4227 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4228 NULL_RTX);
4231 if (swap_i2i3)
4233 rtx_insn *insn;
4234 struct insn_link *link;
4235 rtx ni2dest;
4237 /* I3 now uses what used to be its destination and which is now
4238 I2's destination. This requires us to do a few adjustments. */
4239 PATTERN (i3) = newpat;
4240 adjust_for_new_dest (i3);
4242 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4243 so we still will.
4245 However, some later insn might be using I2's dest and have
4246 a LOG_LINK pointing at I3. We must remove this link.
4247 The simplest way to remove the link is to point it at I1,
4248 which we know will be a NOTE. */
4250 /* newi2pat is usually a SET here; however, recog_for_combine might
4251 have added some clobbers. */
4252 if (GET_CODE (newi2pat) == PARALLEL)
4253 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4254 else
4255 ni2dest = SET_DEST (newi2pat);
4257 for (insn = NEXT_INSN (i3);
4258 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4259 || insn != BB_HEAD (this_basic_block->next_bb));
4260 insn = NEXT_INSN (insn))
4262 if (NONDEBUG_INSN_P (insn)
4263 && reg_referenced_p (ni2dest, PATTERN (insn)))
4265 FOR_EACH_LOG_LINK (link, insn)
4266 if (link->insn == i3)
4267 link->insn = i1;
4269 break;
4275 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4276 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4277 rtx midnotes = 0;
4278 int from_luid;
4279 /* Compute which registers we expect to eliminate. newi2pat may be setting
4280 either i3dest or i2dest, so we must check it. */
4281 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4282 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4283 || !i2dest_killed
4284 ? 0 : i2dest);
4285 /* For i1, we need to compute both local elimination and global
4286 elimination information with respect to newi2pat because i1dest
4287 may be the same as i3dest, in which case newi2pat may be setting
4288 i1dest. Global information is used when distributing REG_DEAD
4289 note for i2 and i3, in which case it does matter if newi2pat sets
4290 i1dest or not.
4292 Local information is used when distributing REG_DEAD note for i1,
4293 in which case it doesn't matter if newi2pat sets i1dest or not.
4294 See PR62151, if we have four insns combination:
4295 i0: r0 <- i0src
4296 i1: r1 <- i1src (using r0)
4297 REG_DEAD (r0)
4298 i2: r0 <- i2src (using r1)
4299 i3: r3 <- i3src (using r0)
4300 ix: using r0
4301 From i1's point of view, r0 is eliminated, no matter if it is set
4302 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4303 should be discarded.
4305 Note local information only affects cases in forms like "I1->I2->I3",
4306 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4307 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4308 i0dest anyway. */
4309 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4310 || !i1dest_killed
4311 ? 0 : i1dest);
4312 rtx elim_i1 = (local_elim_i1 == 0
4313 || (newi2pat && reg_set_p (i1dest, newi2pat))
4314 ? 0 : i1dest);
4315 /* Same case as i1. */
4316 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4317 ? 0 : i0dest);
4318 rtx elim_i0 = (local_elim_i0 == 0
4319 || (newi2pat && reg_set_p (i0dest, newi2pat))
4320 ? 0 : i0dest);
4322 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4323 clear them. */
4324 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4325 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4326 if (i1)
4327 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4328 if (i0)
4329 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4331 /* Ensure that we do not have something that should not be shared but
4332 occurs multiple times in the new insns. Check this by first
4333 resetting all the `used' flags and then copying anything is shared. */
4335 reset_used_flags (i3notes);
4336 reset_used_flags (i2notes);
4337 reset_used_flags (i1notes);
4338 reset_used_flags (i0notes);
4339 reset_used_flags (newpat);
4340 reset_used_flags (newi2pat);
4341 if (undobuf.other_insn)
4342 reset_used_flags (PATTERN (undobuf.other_insn));
4344 i3notes = copy_rtx_if_shared (i3notes);
4345 i2notes = copy_rtx_if_shared (i2notes);
4346 i1notes = copy_rtx_if_shared (i1notes);
4347 i0notes = copy_rtx_if_shared (i0notes);
4348 newpat = copy_rtx_if_shared (newpat);
4349 newi2pat = copy_rtx_if_shared (newi2pat);
4350 if (undobuf.other_insn)
4351 reset_used_flags (PATTERN (undobuf.other_insn));
4353 INSN_CODE (i3) = insn_code_number;
4354 PATTERN (i3) = newpat;
4356 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4358 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4359 link = XEXP (link, 1))
4361 if (substed_i2)
4363 /* I2SRC must still be meaningful at this point. Some
4364 splitting operations can invalidate I2SRC, but those
4365 operations do not apply to calls. */
4366 gcc_assert (i2src);
4367 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4368 i2dest, i2src);
4370 if (substed_i1)
4371 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4372 i1dest, i1src);
4373 if (substed_i0)
4374 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4375 i0dest, i0src);
4379 if (undobuf.other_insn)
4380 INSN_CODE (undobuf.other_insn) = other_code_number;
4382 /* We had one special case above where I2 had more than one set and
4383 we replaced a destination of one of those sets with the destination
4384 of I3. In that case, we have to update LOG_LINKS of insns later
4385 in this basic block. Note that this (expensive) case is rare.
4387 Also, in this case, we must pretend that all REG_NOTEs for I2
4388 actually came from I3, so that REG_UNUSED notes from I2 will be
4389 properly handled. */
4391 if (i3_subst_into_i2)
4393 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4394 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4395 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4396 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4397 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4398 && ! find_reg_note (i2, REG_UNUSED,
4399 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4400 for (temp_insn = NEXT_INSN (i2);
4401 temp_insn
4402 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4403 || BB_HEAD (this_basic_block) != temp_insn);
4404 temp_insn = NEXT_INSN (temp_insn))
4405 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4406 FOR_EACH_LOG_LINK (link, temp_insn)
4407 if (link->insn == i2)
4408 link->insn = i3;
4410 if (i3notes)
4412 rtx link = i3notes;
4413 while (XEXP (link, 1))
4414 link = XEXP (link, 1);
4415 XEXP (link, 1) = i2notes;
4417 else
4418 i3notes = i2notes;
4419 i2notes = 0;
4422 LOG_LINKS (i3) = NULL;
4423 REG_NOTES (i3) = 0;
4424 LOG_LINKS (i2) = NULL;
4425 REG_NOTES (i2) = 0;
4427 if (newi2pat)
4429 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4430 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4431 this_basic_block);
4432 INSN_CODE (i2) = i2_code_number;
4433 PATTERN (i2) = newi2pat;
4435 else
4437 if (MAY_HAVE_DEBUG_INSNS && i2src)
4438 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4439 this_basic_block);
4440 SET_INSN_DELETED (i2);
4443 if (i1)
4445 LOG_LINKS (i1) = NULL;
4446 REG_NOTES (i1) = 0;
4447 if (MAY_HAVE_DEBUG_INSNS)
4448 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4449 this_basic_block);
4450 SET_INSN_DELETED (i1);
4453 if (i0)
4455 LOG_LINKS (i0) = NULL;
4456 REG_NOTES (i0) = 0;
4457 if (MAY_HAVE_DEBUG_INSNS)
4458 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4459 this_basic_block);
4460 SET_INSN_DELETED (i0);
4463 /* Get death notes for everything that is now used in either I3 or
4464 I2 and used to die in a previous insn. If we built two new
4465 patterns, move from I1 to I2 then I2 to I3 so that we get the
4466 proper movement on registers that I2 modifies. */
4468 if (i0)
4469 from_luid = DF_INSN_LUID (i0);
4470 else if (i1)
4471 from_luid = DF_INSN_LUID (i1);
4472 else
4473 from_luid = DF_INSN_LUID (i2);
4474 if (newi2pat)
4475 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4476 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4478 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4479 if (i3notes)
4480 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4481 elim_i2, elim_i1, elim_i0);
4482 if (i2notes)
4483 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4484 elim_i2, elim_i1, elim_i0);
4485 if (i1notes)
4486 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4487 elim_i2, local_elim_i1, local_elim_i0);
4488 if (i0notes)
4489 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4490 elim_i2, elim_i1, local_elim_i0);
4491 if (midnotes)
4492 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4493 elim_i2, elim_i1, elim_i0);
4495 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4496 know these are REG_UNUSED and want them to go to the desired insn,
4497 so we always pass it as i3. */
4499 if (newi2pat && new_i2_notes)
4500 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4501 NULL_RTX);
4503 if (new_i3_notes)
4504 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4505 NULL_RTX);
4507 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4508 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4509 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4510 in that case, it might delete I2. Similarly for I2 and I1.
4511 Show an additional death due to the REG_DEAD note we make here. If
4512 we discard it in distribute_notes, we will decrement it again. */
4514 if (i3dest_killed)
4516 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4517 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4518 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4519 elim_i1, elim_i0);
4520 else
4521 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4522 elim_i2, elim_i1, elim_i0);
4525 if (i2dest_in_i2src)
4527 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4528 if (newi2pat && reg_set_p (i2dest, newi2pat))
4529 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4530 NULL_RTX, NULL_RTX);
4531 else
4532 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4533 NULL_RTX, NULL_RTX, NULL_RTX);
4536 if (i1dest_in_i1src)
4538 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4539 if (newi2pat && reg_set_p (i1dest, newi2pat))
4540 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4541 NULL_RTX, NULL_RTX);
4542 else
4543 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4544 NULL_RTX, NULL_RTX, NULL_RTX);
4547 if (i0dest_in_i0src)
4549 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4550 if (newi2pat && reg_set_p (i0dest, newi2pat))
4551 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4552 NULL_RTX, NULL_RTX);
4553 else
4554 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4555 NULL_RTX, NULL_RTX, NULL_RTX);
4558 distribute_links (i3links);
4559 distribute_links (i2links);
4560 distribute_links (i1links);
4561 distribute_links (i0links);
4563 if (REG_P (i2dest))
4565 struct insn_link *link;
4566 rtx_insn *i2_insn = 0;
4567 rtx i2_val = 0, set;
4569 /* The insn that used to set this register doesn't exist, and
4570 this life of the register may not exist either. See if one of
4571 I3's links points to an insn that sets I2DEST. If it does,
4572 that is now the last known value for I2DEST. If we don't update
4573 this and I2 set the register to a value that depended on its old
4574 contents, we will get confused. If this insn is used, thing
4575 will be set correctly in combine_instructions. */
4576 FOR_EACH_LOG_LINK (link, i3)
4577 if ((set = single_set (link->insn)) != 0
4578 && rtx_equal_p (i2dest, SET_DEST (set)))
4579 i2_insn = link->insn, i2_val = SET_SRC (set);
4581 record_value_for_reg (i2dest, i2_insn, i2_val);
4583 /* If the reg formerly set in I2 died only once and that was in I3,
4584 zero its use count so it won't make `reload' do any work. */
4585 if (! added_sets_2
4586 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4587 && ! i2dest_in_i2src
4588 && REGNO (i2dest) < reg_n_sets_max)
4589 INC_REG_N_SETS (REGNO (i2dest), -1);
4592 if (i1 && REG_P (i1dest))
4594 struct insn_link *link;
4595 rtx_insn *i1_insn = 0;
4596 rtx i1_val = 0, set;
4598 FOR_EACH_LOG_LINK (link, i3)
4599 if ((set = single_set (link->insn)) != 0
4600 && rtx_equal_p (i1dest, SET_DEST (set)))
4601 i1_insn = link->insn, i1_val = SET_SRC (set);
4603 record_value_for_reg (i1dest, i1_insn, i1_val);
4605 if (! added_sets_1
4606 && ! i1dest_in_i1src
4607 && REGNO (i1dest) < reg_n_sets_max)
4608 INC_REG_N_SETS (REGNO (i1dest), -1);
4611 if (i0 && REG_P (i0dest))
4613 struct insn_link *link;
4614 rtx_insn *i0_insn = 0;
4615 rtx i0_val = 0, set;
4617 FOR_EACH_LOG_LINK (link, i3)
4618 if ((set = single_set (link->insn)) != 0
4619 && rtx_equal_p (i0dest, SET_DEST (set)))
4620 i0_insn = link->insn, i0_val = SET_SRC (set);
4622 record_value_for_reg (i0dest, i0_insn, i0_val);
4624 if (! added_sets_0
4625 && ! i0dest_in_i0src
4626 && REGNO (i0dest) < reg_n_sets_max)
4627 INC_REG_N_SETS (REGNO (i0dest), -1);
4630 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4631 been made to this insn. The order is important, because newi2pat
4632 can affect nonzero_bits of newpat. */
4633 if (newi2pat)
4634 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4635 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4638 if (undobuf.other_insn != NULL_RTX)
4640 if (dump_file)
4642 fprintf (dump_file, "modifying other_insn ");
4643 dump_insn_slim (dump_file, undobuf.other_insn);
4645 df_insn_rescan (undobuf.other_insn);
4648 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4650 if (dump_file)
4652 fprintf (dump_file, "modifying insn i0 ");
4653 dump_insn_slim (dump_file, i0);
4655 df_insn_rescan (i0);
4658 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4660 if (dump_file)
4662 fprintf (dump_file, "modifying insn i1 ");
4663 dump_insn_slim (dump_file, i1);
4665 df_insn_rescan (i1);
4668 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4670 if (dump_file)
4672 fprintf (dump_file, "modifying insn i2 ");
4673 dump_insn_slim (dump_file, i2);
4675 df_insn_rescan (i2);
4678 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4680 if (dump_file)
4682 fprintf (dump_file, "modifying insn i3 ");
4683 dump_insn_slim (dump_file, i3);
4685 df_insn_rescan (i3);
4688 /* Set new_direct_jump_p if a new return or simple jump instruction
4689 has been created. Adjust the CFG accordingly. */
4690 if (returnjump_p (i3) || any_uncondjump_p (i3))
4692 *new_direct_jump_p = 1;
4693 mark_jump_label (PATTERN (i3), i3, 0);
4694 update_cfg_for_uncondjump (i3);
4697 if (undobuf.other_insn != NULL_RTX
4698 && (returnjump_p (undobuf.other_insn)
4699 || any_uncondjump_p (undobuf.other_insn)))
4701 *new_direct_jump_p = 1;
4702 update_cfg_for_uncondjump (undobuf.other_insn);
4705 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4706 && XEXP (PATTERN (i3), 0) == const1_rtx)
4708 basic_block bb = BLOCK_FOR_INSN (i3);
4709 gcc_assert (bb);
4710 remove_edge (split_block (bb, i3));
4711 emit_barrier_after_bb (bb);
4712 *new_direct_jump_p = 1;
4715 if (undobuf.other_insn
4716 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4717 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4719 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4720 gcc_assert (bb);
4721 remove_edge (split_block (bb, undobuf.other_insn));
4722 emit_barrier_after_bb (bb);
4723 *new_direct_jump_p = 1;
4726 /* A noop might also need cleaning up of CFG, if it comes from the
4727 simplification of a jump. */
4728 if (JUMP_P (i3)
4729 && GET_CODE (newpat) == SET
4730 && SET_SRC (newpat) == pc_rtx
4731 && SET_DEST (newpat) == pc_rtx)
4733 *new_direct_jump_p = 1;
4734 update_cfg_for_uncondjump (i3);
4737 if (undobuf.other_insn != NULL_RTX
4738 && JUMP_P (undobuf.other_insn)
4739 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4740 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4741 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4743 *new_direct_jump_p = 1;
4744 update_cfg_for_uncondjump (undobuf.other_insn);
4747 combine_successes++;
4748 undo_commit ();
4750 if (added_links_insn
4751 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4752 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4753 return added_links_insn;
4754 else
4755 return newi2pat ? i2 : i3;
4758 /* Get a marker for undoing to the current state. */
4760 static void *
4761 get_undo_marker (void)
4763 return undobuf.undos;
4766 /* Undo the modifications up to the marker. */
4768 static void
4769 undo_to_marker (void *marker)
4771 struct undo *undo, *next;
4773 for (undo = undobuf.undos; undo != marker; undo = next)
4775 gcc_assert (undo);
4777 next = undo->next;
4778 switch (undo->kind)
4780 case UNDO_RTX:
4781 *undo->where.r = undo->old_contents.r;
4782 break;
4783 case UNDO_INT:
4784 *undo->where.i = undo->old_contents.i;
4785 break;
4786 case UNDO_MODE:
4787 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4788 break;
4789 case UNDO_LINKS:
4790 *undo->where.l = undo->old_contents.l;
4791 break;
4792 default:
4793 gcc_unreachable ();
4796 undo->next = undobuf.frees;
4797 undobuf.frees = undo;
4800 undobuf.undos = (struct undo *) marker;
4803 /* Undo all the modifications recorded in undobuf. */
4805 static void
4806 undo_all (void)
4808 undo_to_marker (0);
4811 /* We've committed to accepting the changes we made. Move all
4812 of the undos to the free list. */
4814 static void
4815 undo_commit (void)
4817 struct undo *undo, *next;
4819 for (undo = undobuf.undos; undo; undo = next)
4821 next = undo->next;
4822 undo->next = undobuf.frees;
4823 undobuf.frees = undo;
4825 undobuf.undos = 0;
4828 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4829 where we have an arithmetic expression and return that point. LOC will
4830 be inside INSN.
4832 try_combine will call this function to see if an insn can be split into
4833 two insns. */
4835 static rtx *
4836 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4838 rtx x = *loc;
4839 enum rtx_code code = GET_CODE (x);
4840 rtx *split;
4841 unsigned HOST_WIDE_INT len = 0;
4842 HOST_WIDE_INT pos = 0;
4843 int unsignedp = 0;
4844 rtx inner = NULL_RTX;
4845 scalar_int_mode mode, inner_mode;
4847 /* First special-case some codes. */
4848 switch (code)
4850 case SUBREG:
4851 #ifdef INSN_SCHEDULING
4852 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4853 point. */
4854 if (MEM_P (SUBREG_REG (x)))
4855 return loc;
4856 #endif
4857 return find_split_point (&SUBREG_REG (x), insn, false);
4859 case MEM:
4860 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4861 using LO_SUM and HIGH. */
4862 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4863 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4865 machine_mode address_mode = get_address_mode (x);
4867 SUBST (XEXP (x, 0),
4868 gen_rtx_LO_SUM (address_mode,
4869 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4870 XEXP (x, 0)));
4871 return &XEXP (XEXP (x, 0), 0);
4874 /* If we have a PLUS whose second operand is a constant and the
4875 address is not valid, perhaps will can split it up using
4876 the machine-specific way to split large constants. We use
4877 the first pseudo-reg (one of the virtual regs) as a placeholder;
4878 it will not remain in the result. */
4879 if (GET_CODE (XEXP (x, 0)) == PLUS
4880 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4881 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4882 MEM_ADDR_SPACE (x)))
4884 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4885 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4886 subst_insn);
4888 /* This should have produced two insns, each of which sets our
4889 placeholder. If the source of the second is a valid address,
4890 we can make put both sources together and make a split point
4891 in the middle. */
4893 if (seq
4894 && NEXT_INSN (seq) != NULL_RTX
4895 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4896 && NONJUMP_INSN_P (seq)
4897 && GET_CODE (PATTERN (seq)) == SET
4898 && SET_DEST (PATTERN (seq)) == reg
4899 && ! reg_mentioned_p (reg,
4900 SET_SRC (PATTERN (seq)))
4901 && NONJUMP_INSN_P (NEXT_INSN (seq))
4902 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4903 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4904 && memory_address_addr_space_p
4905 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4906 MEM_ADDR_SPACE (x)))
4908 rtx src1 = SET_SRC (PATTERN (seq));
4909 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4911 /* Replace the placeholder in SRC2 with SRC1. If we can
4912 find where in SRC2 it was placed, that can become our
4913 split point and we can replace this address with SRC2.
4914 Just try two obvious places. */
4916 src2 = replace_rtx (src2, reg, src1);
4917 split = 0;
4918 if (XEXP (src2, 0) == src1)
4919 split = &XEXP (src2, 0);
4920 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4921 && XEXP (XEXP (src2, 0), 0) == src1)
4922 split = &XEXP (XEXP (src2, 0), 0);
4924 if (split)
4926 SUBST (XEXP (x, 0), src2);
4927 return split;
4931 /* If that didn't work, perhaps the first operand is complex and
4932 needs to be computed separately, so make a split point there.
4933 This will occur on machines that just support REG + CONST
4934 and have a constant moved through some previous computation. */
4936 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4937 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4938 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4939 return &XEXP (XEXP (x, 0), 0);
4942 /* If we have a PLUS whose first operand is complex, try computing it
4943 separately by making a split there. */
4944 if (GET_CODE (XEXP (x, 0)) == PLUS
4945 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4946 MEM_ADDR_SPACE (x))
4947 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4948 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4949 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4950 return &XEXP (XEXP (x, 0), 0);
4951 break;
4953 case SET:
4954 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4955 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4956 we need to put the operand into a register. So split at that
4957 point. */
4959 if (SET_DEST (x) == cc0_rtx
4960 && GET_CODE (SET_SRC (x)) != COMPARE
4961 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4962 && !OBJECT_P (SET_SRC (x))
4963 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4964 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4965 return &SET_SRC (x);
4967 /* See if we can split SET_SRC as it stands. */
4968 split = find_split_point (&SET_SRC (x), insn, true);
4969 if (split && split != &SET_SRC (x))
4970 return split;
4972 /* See if we can split SET_DEST as it stands. */
4973 split = find_split_point (&SET_DEST (x), insn, false);
4974 if (split && split != &SET_DEST (x))
4975 return split;
4977 /* See if this is a bitfield assignment with everything constant. If
4978 so, this is an IOR of an AND, so split it into that. */
4979 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4980 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
4981 &inner_mode)
4982 && HWI_COMPUTABLE_MODE_P (inner_mode)
4983 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4984 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4985 && CONST_INT_P (SET_SRC (x))
4986 && ((INTVAL (XEXP (SET_DEST (x), 1))
4987 + INTVAL (XEXP (SET_DEST (x), 2)))
4988 <= GET_MODE_PRECISION (inner_mode))
4989 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4991 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4992 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4993 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4994 rtx dest = XEXP (SET_DEST (x), 0);
4995 unsigned HOST_WIDE_INT mask
4996 = (HOST_WIDE_INT_1U << len) - 1;
4997 rtx or_mask;
4999 if (BITS_BIG_ENDIAN)
5000 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5002 or_mask = gen_int_mode (src << pos, inner_mode);
5003 if (src == mask)
5004 SUBST (SET_SRC (x),
5005 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5006 else
5008 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5009 SUBST (SET_SRC (x),
5010 simplify_gen_binary (IOR, inner_mode,
5011 simplify_gen_binary (AND, inner_mode,
5012 dest, negmask),
5013 or_mask));
5016 SUBST (SET_DEST (x), dest);
5018 split = find_split_point (&SET_SRC (x), insn, true);
5019 if (split && split != &SET_SRC (x))
5020 return split;
5023 /* Otherwise, see if this is an operation that we can split into two.
5024 If so, try to split that. */
5025 code = GET_CODE (SET_SRC (x));
5027 switch (code)
5029 case AND:
5030 /* If we are AND'ing with a large constant that is only a single
5031 bit and the result is only being used in a context where we
5032 need to know if it is zero or nonzero, replace it with a bit
5033 extraction. This will avoid the large constant, which might
5034 have taken more than one insn to make. If the constant were
5035 not a valid argument to the AND but took only one insn to make,
5036 this is no worse, but if it took more than one insn, it will
5037 be better. */
5039 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5040 && REG_P (XEXP (SET_SRC (x), 0))
5041 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5042 && REG_P (SET_DEST (x))
5043 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5044 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5045 && XEXP (*split, 0) == SET_DEST (x)
5046 && XEXP (*split, 1) == const0_rtx)
5048 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5049 XEXP (SET_SRC (x), 0),
5050 pos, NULL_RTX, 1, 1, 0, 0);
5051 if (extraction != 0)
5053 SUBST (SET_SRC (x), extraction);
5054 return find_split_point (loc, insn, false);
5057 break;
5059 case NE:
5060 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5061 is known to be on, this can be converted into a NEG of a shift. */
5062 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5063 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5064 && 1 <= (pos = exact_log2
5065 (nonzero_bits (XEXP (SET_SRC (x), 0),
5066 GET_MODE (XEXP (SET_SRC (x), 0))))))
5068 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5070 SUBST (SET_SRC (x),
5071 gen_rtx_NEG (mode,
5072 gen_rtx_LSHIFTRT (mode,
5073 XEXP (SET_SRC (x), 0),
5074 GEN_INT (pos))));
5076 split = find_split_point (&SET_SRC (x), insn, true);
5077 if (split && split != &SET_SRC (x))
5078 return split;
5080 break;
5082 case SIGN_EXTEND:
5083 inner = XEXP (SET_SRC (x), 0);
5085 /* We can't optimize if either mode is a partial integer
5086 mode as we don't know how many bits are significant
5087 in those modes. */
5088 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5089 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5090 break;
5092 pos = 0;
5093 len = GET_MODE_PRECISION (inner_mode);
5094 unsignedp = 0;
5095 break;
5097 case SIGN_EXTRACT:
5098 case ZERO_EXTRACT:
5099 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5100 &inner_mode)
5101 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5102 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5104 inner = XEXP (SET_SRC (x), 0);
5105 len = INTVAL (XEXP (SET_SRC (x), 1));
5106 pos = INTVAL (XEXP (SET_SRC (x), 2));
5108 if (BITS_BIG_ENDIAN)
5109 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5110 unsignedp = (code == ZERO_EXTRACT);
5112 break;
5114 default:
5115 break;
5118 if (len && pos >= 0
5119 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))
5120 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5122 /* For unsigned, we have a choice of a shift followed by an
5123 AND or two shifts. Use two shifts for field sizes where the
5124 constant might be too large. We assume here that we can
5125 always at least get 8-bit constants in an AND insn, which is
5126 true for every current RISC. */
5128 if (unsignedp && len <= 8)
5130 unsigned HOST_WIDE_INT mask
5131 = (HOST_WIDE_INT_1U << len) - 1;
5132 SUBST (SET_SRC (x),
5133 gen_rtx_AND (mode,
5134 gen_rtx_LSHIFTRT
5135 (mode, gen_lowpart (mode, inner),
5136 GEN_INT (pos)),
5137 gen_int_mode (mask, mode)));
5139 split = find_split_point (&SET_SRC (x), insn, true);
5140 if (split && split != &SET_SRC (x))
5141 return split;
5143 else
5145 SUBST (SET_SRC (x),
5146 gen_rtx_fmt_ee
5147 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5148 gen_rtx_ASHIFT (mode,
5149 gen_lowpart (mode, inner),
5150 GEN_INT (GET_MODE_PRECISION (mode)
5151 - len - pos)),
5152 GEN_INT (GET_MODE_PRECISION (mode) - len)));
5154 split = find_split_point (&SET_SRC (x), insn, true);
5155 if (split && split != &SET_SRC (x))
5156 return split;
5160 /* See if this is a simple operation with a constant as the second
5161 operand. It might be that this constant is out of range and hence
5162 could be used as a split point. */
5163 if (BINARY_P (SET_SRC (x))
5164 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5165 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5166 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5167 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5168 return &XEXP (SET_SRC (x), 1);
5170 /* Finally, see if this is a simple operation with its first operand
5171 not in a register. The operation might require this operand in a
5172 register, so return it as a split point. We can always do this
5173 because if the first operand were another operation, we would have
5174 already found it as a split point. */
5175 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5176 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5177 return &XEXP (SET_SRC (x), 0);
5179 return 0;
5181 case AND:
5182 case IOR:
5183 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5184 it is better to write this as (not (ior A B)) so we can split it.
5185 Similarly for IOR. */
5186 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5188 SUBST (*loc,
5189 gen_rtx_NOT (GET_MODE (x),
5190 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5191 GET_MODE (x),
5192 XEXP (XEXP (x, 0), 0),
5193 XEXP (XEXP (x, 1), 0))));
5194 return find_split_point (loc, insn, set_src);
5197 /* Many RISC machines have a large set of logical insns. If the
5198 second operand is a NOT, put it first so we will try to split the
5199 other operand first. */
5200 if (GET_CODE (XEXP (x, 1)) == NOT)
5202 rtx tem = XEXP (x, 0);
5203 SUBST (XEXP (x, 0), XEXP (x, 1));
5204 SUBST (XEXP (x, 1), tem);
5206 break;
5208 case PLUS:
5209 case MINUS:
5210 /* Canonicalization can produce (minus A (mult B C)), where C is a
5211 constant. It may be better to try splitting (plus (mult B -C) A)
5212 instead if this isn't a multiply by a power of two. */
5213 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5214 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5215 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5217 machine_mode mode = GET_MODE (x);
5218 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5219 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5220 SUBST (*loc, gen_rtx_PLUS (mode,
5221 gen_rtx_MULT (mode,
5222 XEXP (XEXP (x, 1), 0),
5223 gen_int_mode (other_int,
5224 mode)),
5225 XEXP (x, 0)));
5226 return find_split_point (loc, insn, set_src);
5229 /* Split at a multiply-accumulate instruction. However if this is
5230 the SET_SRC, we likely do not have such an instruction and it's
5231 worthless to try this split. */
5232 if (!set_src
5233 && (GET_CODE (XEXP (x, 0)) == MULT
5234 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5235 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5236 return loc;
5238 default:
5239 break;
5242 /* Otherwise, select our actions depending on our rtx class. */
5243 switch (GET_RTX_CLASS (code))
5245 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5246 case RTX_TERNARY:
5247 split = find_split_point (&XEXP (x, 2), insn, false);
5248 if (split)
5249 return split;
5250 /* fall through */
5251 case RTX_BIN_ARITH:
5252 case RTX_COMM_ARITH:
5253 case RTX_COMPARE:
5254 case RTX_COMM_COMPARE:
5255 split = find_split_point (&XEXP (x, 1), insn, false);
5256 if (split)
5257 return split;
5258 /* fall through */
5259 case RTX_UNARY:
5260 /* Some machines have (and (shift ...) ...) insns. If X is not
5261 an AND, but XEXP (X, 0) is, use it as our split point. */
5262 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5263 return &XEXP (x, 0);
5265 split = find_split_point (&XEXP (x, 0), insn, false);
5266 if (split)
5267 return split;
5268 return loc;
5270 default:
5271 /* Otherwise, we don't have a split point. */
5272 return 0;
5276 /* Throughout X, replace FROM with TO, and return the result.
5277 The result is TO if X is FROM;
5278 otherwise the result is X, but its contents may have been modified.
5279 If they were modified, a record was made in undobuf so that
5280 undo_all will (among other things) return X to its original state.
5282 If the number of changes necessary is too much to record to undo,
5283 the excess changes are not made, so the result is invalid.
5284 The changes already made can still be undone.
5285 undobuf.num_undo is incremented for such changes, so by testing that
5286 the caller can tell whether the result is valid.
5288 `n_occurrences' is incremented each time FROM is replaced.
5290 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5292 IN_COND is nonzero if we are at the top level of a condition.
5294 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5295 by copying if `n_occurrences' is nonzero. */
5297 static rtx
5298 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5300 enum rtx_code code = GET_CODE (x);
5301 machine_mode op0_mode = VOIDmode;
5302 const char *fmt;
5303 int len, i;
5304 rtx new_rtx;
5306 /* Two expressions are equal if they are identical copies of a shared
5307 RTX or if they are both registers with the same register number
5308 and mode. */
5310 #define COMBINE_RTX_EQUAL_P(X,Y) \
5311 ((X) == (Y) \
5312 || (REG_P (X) && REG_P (Y) \
5313 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5315 /* Do not substitute into clobbers of regs -- this will never result in
5316 valid RTL. */
5317 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5318 return x;
5320 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5322 n_occurrences++;
5323 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5326 /* If X and FROM are the same register but different modes, they
5327 will not have been seen as equal above. However, the log links code
5328 will make a LOG_LINKS entry for that case. If we do nothing, we
5329 will try to rerecognize our original insn and, when it succeeds,
5330 we will delete the feeding insn, which is incorrect.
5332 So force this insn not to match in this (rare) case. */
5333 if (! in_dest && code == REG && REG_P (from)
5334 && reg_overlap_mentioned_p (x, from))
5335 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5337 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5338 of which may contain things that can be combined. */
5339 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5340 return x;
5342 /* It is possible to have a subexpression appear twice in the insn.
5343 Suppose that FROM is a register that appears within TO.
5344 Then, after that subexpression has been scanned once by `subst',
5345 the second time it is scanned, TO may be found. If we were
5346 to scan TO here, we would find FROM within it and create a
5347 self-referent rtl structure which is completely wrong. */
5348 if (COMBINE_RTX_EQUAL_P (x, to))
5349 return to;
5351 /* Parallel asm_operands need special attention because all of the
5352 inputs are shared across the arms. Furthermore, unsharing the
5353 rtl results in recognition failures. Failure to handle this case
5354 specially can result in circular rtl.
5356 Solve this by doing a normal pass across the first entry of the
5357 parallel, and only processing the SET_DESTs of the subsequent
5358 entries. Ug. */
5360 if (code == PARALLEL
5361 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5362 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5364 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5366 /* If this substitution failed, this whole thing fails. */
5367 if (GET_CODE (new_rtx) == CLOBBER
5368 && XEXP (new_rtx, 0) == const0_rtx)
5369 return new_rtx;
5371 SUBST (XVECEXP (x, 0, 0), new_rtx);
5373 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5375 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5377 if (!REG_P (dest)
5378 && GET_CODE (dest) != CC0
5379 && GET_CODE (dest) != PC)
5381 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5383 /* If this substitution failed, this whole thing fails. */
5384 if (GET_CODE (new_rtx) == CLOBBER
5385 && XEXP (new_rtx, 0) == const0_rtx)
5386 return new_rtx;
5388 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5392 else
5394 len = GET_RTX_LENGTH (code);
5395 fmt = GET_RTX_FORMAT (code);
5397 /* We don't need to process a SET_DEST that is a register, CC0,
5398 or PC, so set up to skip this common case. All other cases
5399 where we want to suppress replacing something inside a
5400 SET_SRC are handled via the IN_DEST operand. */
5401 if (code == SET
5402 && (REG_P (SET_DEST (x))
5403 || GET_CODE (SET_DEST (x)) == CC0
5404 || GET_CODE (SET_DEST (x)) == PC))
5405 fmt = "ie";
5407 /* Trying to simplify the operands of a widening MULT is not likely
5408 to create RTL matching a machine insn. */
5409 if (code == MULT
5410 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5411 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5412 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5413 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5414 && REG_P (XEXP (XEXP (x, 0), 0))
5415 && REG_P (XEXP (XEXP (x, 1), 0))
5416 && from == to)
5417 return x;
5420 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5421 constant. */
5422 if (fmt[0] == 'e')
5423 op0_mode = GET_MODE (XEXP (x, 0));
5425 for (i = 0; i < len; i++)
5427 if (fmt[i] == 'E')
5429 int j;
5430 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5432 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5434 new_rtx = (unique_copy && n_occurrences
5435 ? copy_rtx (to) : to);
5436 n_occurrences++;
5438 else
5440 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5441 unique_copy);
5443 /* If this substitution failed, this whole thing
5444 fails. */
5445 if (GET_CODE (new_rtx) == CLOBBER
5446 && XEXP (new_rtx, 0) == const0_rtx)
5447 return new_rtx;
5450 SUBST (XVECEXP (x, i, j), new_rtx);
5453 else if (fmt[i] == 'e')
5455 /* If this is a register being set, ignore it. */
5456 new_rtx = XEXP (x, i);
5457 if (in_dest
5458 && i == 0
5459 && (((code == SUBREG || code == ZERO_EXTRACT)
5460 && REG_P (new_rtx))
5461 || code == STRICT_LOW_PART))
5464 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5466 /* In general, don't install a subreg involving two
5467 modes not tieable. It can worsen register
5468 allocation, and can even make invalid reload
5469 insns, since the reg inside may need to be copied
5470 from in the outside mode, and that may be invalid
5471 if it is an fp reg copied in integer mode.
5473 We allow two exceptions to this: It is valid if
5474 it is inside another SUBREG and the mode of that
5475 SUBREG and the mode of the inside of TO is
5476 tieable and it is valid if X is a SET that copies
5477 FROM to CC0. */
5479 if (GET_CODE (to) == SUBREG
5480 && !targetm.modes_tieable_p (GET_MODE (to),
5481 GET_MODE (SUBREG_REG (to)))
5482 && ! (code == SUBREG
5483 && (targetm.modes_tieable_p
5484 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5485 && (!HAVE_cc0
5486 || (! (code == SET
5487 && i == 1
5488 && XEXP (x, 0) == cc0_rtx))))
5489 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5491 if (code == SUBREG
5492 && REG_P (to)
5493 && REGNO (to) < FIRST_PSEUDO_REGISTER
5494 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5495 SUBREG_BYTE (x),
5496 GET_MODE (x)) < 0)
5497 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5499 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5500 n_occurrences++;
5502 else
5503 /* If we are in a SET_DEST, suppress most cases unless we
5504 have gone inside a MEM, in which case we want to
5505 simplify the address. We assume here that things that
5506 are actually part of the destination have their inner
5507 parts in the first expression. This is true for SUBREG,
5508 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5509 things aside from REG and MEM that should appear in a
5510 SET_DEST. */
5511 new_rtx = subst (XEXP (x, i), from, to,
5512 (((in_dest
5513 && (code == SUBREG || code == STRICT_LOW_PART
5514 || code == ZERO_EXTRACT))
5515 || code == SET)
5516 && i == 0),
5517 code == IF_THEN_ELSE && i == 0,
5518 unique_copy);
5520 /* If we found that we will have to reject this combination,
5521 indicate that by returning the CLOBBER ourselves, rather than
5522 an expression containing it. This will speed things up as
5523 well as prevent accidents where two CLOBBERs are considered
5524 to be equal, thus producing an incorrect simplification. */
5526 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5527 return new_rtx;
5529 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5531 machine_mode mode = GET_MODE (x);
5533 x = simplify_subreg (GET_MODE (x), new_rtx,
5534 GET_MODE (SUBREG_REG (x)),
5535 SUBREG_BYTE (x));
5536 if (! x)
5537 x = gen_rtx_CLOBBER (mode, const0_rtx);
5539 else if (CONST_SCALAR_INT_P (new_rtx)
5540 && GET_CODE (x) == ZERO_EXTEND)
5542 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5543 new_rtx, GET_MODE (XEXP (x, 0)));
5544 gcc_assert (x);
5546 else
5547 SUBST (XEXP (x, i), new_rtx);
5552 /* Check if we are loading something from the constant pool via float
5553 extension; in this case we would undo compress_float_constant
5554 optimization and degenerate constant load to an immediate value. */
5555 if (GET_CODE (x) == FLOAT_EXTEND
5556 && MEM_P (XEXP (x, 0))
5557 && MEM_READONLY_P (XEXP (x, 0)))
5559 rtx tmp = avoid_constant_pool_reference (x);
5560 if (x != tmp)
5561 return x;
5564 /* Try to simplify X. If the simplification changed the code, it is likely
5565 that further simplification will help, so loop, but limit the number
5566 of repetitions that will be performed. */
5568 for (i = 0; i < 4; i++)
5570 /* If X is sufficiently simple, don't bother trying to do anything
5571 with it. */
5572 if (code != CONST_INT && code != REG && code != CLOBBER)
5573 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5575 if (GET_CODE (x) == code)
5576 break;
5578 code = GET_CODE (x);
5580 /* We no longer know the original mode of operand 0 since we
5581 have changed the form of X) */
5582 op0_mode = VOIDmode;
5585 return x;
5588 /* If X is a commutative operation whose operands are not in the canonical
5589 order, use substitutions to swap them. */
5591 static void
5592 maybe_swap_commutative_operands (rtx x)
5594 if (COMMUTATIVE_ARITH_P (x)
5595 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5597 rtx temp = XEXP (x, 0);
5598 SUBST (XEXP (x, 0), XEXP (x, 1));
5599 SUBST (XEXP (x, 1), temp);
5603 /* Simplify X, a piece of RTL. We just operate on the expression at the
5604 outer level; call `subst' to simplify recursively. Return the new
5605 expression.
5607 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5608 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5609 of a condition. */
5611 static rtx
5612 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5613 int in_cond)
5615 enum rtx_code code = GET_CODE (x);
5616 machine_mode mode = GET_MODE (x);
5617 scalar_int_mode int_mode;
5618 rtx temp;
5619 int i;
5621 /* If this is a commutative operation, put a constant last and a complex
5622 expression first. We don't need to do this for comparisons here. */
5623 maybe_swap_commutative_operands (x);
5625 /* Try to fold this expression in case we have constants that weren't
5626 present before. */
5627 temp = 0;
5628 switch (GET_RTX_CLASS (code))
5630 case RTX_UNARY:
5631 if (op0_mode == VOIDmode)
5632 op0_mode = GET_MODE (XEXP (x, 0));
5633 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5634 break;
5635 case RTX_COMPARE:
5636 case RTX_COMM_COMPARE:
5638 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5639 if (cmp_mode == VOIDmode)
5641 cmp_mode = GET_MODE (XEXP (x, 1));
5642 if (cmp_mode == VOIDmode)
5643 cmp_mode = op0_mode;
5645 temp = simplify_relational_operation (code, mode, cmp_mode,
5646 XEXP (x, 0), XEXP (x, 1));
5648 break;
5649 case RTX_COMM_ARITH:
5650 case RTX_BIN_ARITH:
5651 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5652 break;
5653 case RTX_BITFIELD_OPS:
5654 case RTX_TERNARY:
5655 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5656 XEXP (x, 1), XEXP (x, 2));
5657 break;
5658 default:
5659 break;
5662 if (temp)
5664 x = temp;
5665 code = GET_CODE (temp);
5666 op0_mode = VOIDmode;
5667 mode = GET_MODE (temp);
5670 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5671 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5672 things. Check for cases where both arms are testing the same
5673 condition.
5675 Don't do anything if all operands are very simple. */
5677 if ((BINARY_P (x)
5678 && ((!OBJECT_P (XEXP (x, 0))
5679 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5680 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5681 || (!OBJECT_P (XEXP (x, 1))
5682 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5683 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5684 || (UNARY_P (x)
5685 && (!OBJECT_P (XEXP (x, 0))
5686 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5687 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5689 rtx cond, true_rtx, false_rtx;
5691 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5692 if (cond != 0
5693 /* If everything is a comparison, what we have is highly unlikely
5694 to be simpler, so don't use it. */
5695 && ! (COMPARISON_P (x)
5696 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5698 rtx cop1 = const0_rtx;
5699 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5701 if (cond_code == NE && COMPARISON_P (cond))
5702 return x;
5704 /* Simplify the alternative arms; this may collapse the true and
5705 false arms to store-flag values. Be careful to use copy_rtx
5706 here since true_rtx or false_rtx might share RTL with x as a
5707 result of the if_then_else_cond call above. */
5708 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5709 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5711 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5712 is unlikely to be simpler. */
5713 if (general_operand (true_rtx, VOIDmode)
5714 && general_operand (false_rtx, VOIDmode))
5716 enum rtx_code reversed;
5718 /* Restarting if we generate a store-flag expression will cause
5719 us to loop. Just drop through in this case. */
5721 /* If the result values are STORE_FLAG_VALUE and zero, we can
5722 just make the comparison operation. */
5723 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5724 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5725 cond, cop1);
5726 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5727 && ((reversed = reversed_comparison_code_parts
5728 (cond_code, cond, cop1, NULL))
5729 != UNKNOWN))
5730 x = simplify_gen_relational (reversed, mode, VOIDmode,
5731 cond, cop1);
5733 /* Likewise, we can make the negate of a comparison operation
5734 if the result values are - STORE_FLAG_VALUE and zero. */
5735 else if (CONST_INT_P (true_rtx)
5736 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5737 && false_rtx == const0_rtx)
5738 x = simplify_gen_unary (NEG, mode,
5739 simplify_gen_relational (cond_code,
5740 mode, VOIDmode,
5741 cond, cop1),
5742 mode);
5743 else if (CONST_INT_P (false_rtx)
5744 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5745 && true_rtx == const0_rtx
5746 && ((reversed = reversed_comparison_code_parts
5747 (cond_code, cond, cop1, NULL))
5748 != UNKNOWN))
5749 x = simplify_gen_unary (NEG, mode,
5750 simplify_gen_relational (reversed,
5751 mode, VOIDmode,
5752 cond, cop1),
5753 mode);
5754 else
5755 return gen_rtx_IF_THEN_ELSE (mode,
5756 simplify_gen_relational (cond_code,
5757 mode,
5758 VOIDmode,
5759 cond,
5760 cop1),
5761 true_rtx, false_rtx);
5763 code = GET_CODE (x);
5764 op0_mode = VOIDmode;
5769 /* First see if we can apply the inverse distributive law. */
5770 if (code == PLUS || code == MINUS
5771 || code == AND || code == IOR || code == XOR)
5773 x = apply_distributive_law (x);
5774 code = GET_CODE (x);
5775 op0_mode = VOIDmode;
5778 /* If CODE is an associative operation not otherwise handled, see if we
5779 can associate some operands. This can win if they are constants or
5780 if they are logically related (i.e. (a & b) & a). */
5781 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5782 || code == AND || code == IOR || code == XOR
5783 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5784 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5785 || (flag_associative_math && FLOAT_MODE_P (mode))))
5787 if (GET_CODE (XEXP (x, 0)) == code)
5789 rtx other = XEXP (XEXP (x, 0), 0);
5790 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5791 rtx inner_op1 = XEXP (x, 1);
5792 rtx inner;
5794 /* Make sure we pass the constant operand if any as the second
5795 one if this is a commutative operation. */
5796 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5797 std::swap (inner_op0, inner_op1);
5798 inner = simplify_binary_operation (code == MINUS ? PLUS
5799 : code == DIV ? MULT
5800 : code,
5801 mode, inner_op0, inner_op1);
5803 /* For commutative operations, try the other pair if that one
5804 didn't simplify. */
5805 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5807 other = XEXP (XEXP (x, 0), 1);
5808 inner = simplify_binary_operation (code, mode,
5809 XEXP (XEXP (x, 0), 0),
5810 XEXP (x, 1));
5813 if (inner)
5814 return simplify_gen_binary (code, mode, other, inner);
5818 /* A little bit of algebraic simplification here. */
5819 switch (code)
5821 case MEM:
5822 /* Ensure that our address has any ASHIFTs converted to MULT in case
5823 address-recognizing predicates are called later. */
5824 temp = make_compound_operation (XEXP (x, 0), MEM);
5825 SUBST (XEXP (x, 0), temp);
5826 break;
5828 case SUBREG:
5829 if (op0_mode == VOIDmode)
5830 op0_mode = GET_MODE (SUBREG_REG (x));
5832 /* See if this can be moved to simplify_subreg. */
5833 if (CONSTANT_P (SUBREG_REG (x))
5834 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5835 /* Don't call gen_lowpart if the inner mode
5836 is VOIDmode and we cannot simplify it, as SUBREG without
5837 inner mode is invalid. */
5838 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5839 || gen_lowpart_common (mode, SUBREG_REG (x))))
5840 return gen_lowpart (mode, SUBREG_REG (x));
5842 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5843 break;
5845 rtx temp;
5846 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5847 SUBREG_BYTE (x));
5848 if (temp)
5849 return temp;
5851 /* If op is known to have all lower bits zero, the result is zero. */
5852 scalar_int_mode int_mode, int_op0_mode;
5853 if (!in_dest
5854 && is_a <scalar_int_mode> (mode, &int_mode)
5855 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5856 && (GET_MODE_PRECISION (int_mode)
5857 < GET_MODE_PRECISION (int_op0_mode))
5858 && (subreg_lowpart_offset (int_mode, int_op0_mode)
5859 == SUBREG_BYTE (x))
5860 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5861 && (nonzero_bits (SUBREG_REG (x), int_op0_mode)
5862 & GET_MODE_MASK (int_mode)) == 0)
5863 return CONST0_RTX (int_mode);
5866 /* Don't change the mode of the MEM if that would change the meaning
5867 of the address. */
5868 if (MEM_P (SUBREG_REG (x))
5869 && (MEM_VOLATILE_P (SUBREG_REG (x))
5870 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5871 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5872 return gen_rtx_CLOBBER (mode, const0_rtx);
5874 /* Note that we cannot do any narrowing for non-constants since
5875 we might have been counting on using the fact that some bits were
5876 zero. We now do this in the SET. */
5878 break;
5880 case NEG:
5881 temp = expand_compound_operation (XEXP (x, 0));
5883 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5884 replaced by (lshiftrt X C). This will convert
5885 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5887 if (GET_CODE (temp) == ASHIFTRT
5888 && CONST_INT_P (XEXP (temp, 1))
5889 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5890 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5891 INTVAL (XEXP (temp, 1)));
5893 /* If X has only a single bit that might be nonzero, say, bit I, convert
5894 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5895 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5896 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5897 or a SUBREG of one since we'd be making the expression more
5898 complex if it was just a register. */
5900 if (!REG_P (temp)
5901 && ! (GET_CODE (temp) == SUBREG
5902 && REG_P (SUBREG_REG (temp)))
5903 && is_a <scalar_int_mode> (mode, &int_mode)
5904 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5906 rtx temp1 = simplify_shift_const
5907 (NULL_RTX, ASHIFTRT, int_mode,
5908 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5909 GET_MODE_PRECISION (int_mode) - 1 - i),
5910 GET_MODE_PRECISION (int_mode) - 1 - i);
5912 /* If all we did was surround TEMP with the two shifts, we
5913 haven't improved anything, so don't use it. Otherwise,
5914 we are better off with TEMP1. */
5915 if (GET_CODE (temp1) != ASHIFTRT
5916 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5917 || XEXP (XEXP (temp1, 0), 0) != temp)
5918 return temp1;
5920 break;
5922 case TRUNCATE:
5923 /* We can't handle truncation to a partial integer mode here
5924 because we don't know the real bitsize of the partial
5925 integer mode. */
5926 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5927 break;
5929 if (HWI_COMPUTABLE_MODE_P (mode))
5930 SUBST (XEXP (x, 0),
5931 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5932 GET_MODE_MASK (mode), 0));
5934 /* We can truncate a constant value and return it. */
5935 if (CONST_INT_P (XEXP (x, 0)))
5936 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5938 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5939 whose value is a comparison can be replaced with a subreg if
5940 STORE_FLAG_VALUE permits. */
5941 if (HWI_COMPUTABLE_MODE_P (mode)
5942 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5943 && (temp = get_last_value (XEXP (x, 0)))
5944 && COMPARISON_P (temp))
5945 return gen_lowpart (mode, XEXP (x, 0));
5946 break;
5948 case CONST:
5949 /* (const (const X)) can become (const X). Do it this way rather than
5950 returning the inner CONST since CONST can be shared with a
5951 REG_EQUAL note. */
5952 if (GET_CODE (XEXP (x, 0)) == CONST)
5953 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5954 break;
5956 case LO_SUM:
5957 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5958 can add in an offset. find_split_point will split this address up
5959 again if it doesn't match. */
5960 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5961 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5962 return XEXP (x, 1);
5963 break;
5965 case PLUS:
5966 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5967 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5968 bit-field and can be replaced by either a sign_extend or a
5969 sign_extract. The `and' may be a zero_extend and the two
5970 <c>, -<c> constants may be reversed. */
5971 if (GET_CODE (XEXP (x, 0)) == XOR
5972 && is_a <scalar_int_mode> (mode, &int_mode)
5973 && CONST_INT_P (XEXP (x, 1))
5974 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5975 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5976 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5977 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5978 && HWI_COMPUTABLE_MODE_P (int_mode)
5979 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5980 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5981 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5982 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5983 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5984 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5985 == (unsigned int) i + 1))))
5986 return simplify_shift_const
5987 (NULL_RTX, ASHIFTRT, int_mode,
5988 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5989 XEXP (XEXP (XEXP (x, 0), 0), 0),
5990 GET_MODE_PRECISION (int_mode) - (i + 1)),
5991 GET_MODE_PRECISION (int_mode) - (i + 1));
5993 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5994 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5995 the bitsize of the mode - 1. This allows simplification of
5996 "a = (b & 8) == 0;" */
5997 if (XEXP (x, 1) == constm1_rtx
5998 && !REG_P (XEXP (x, 0))
5999 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6000 && REG_P (SUBREG_REG (XEXP (x, 0))))
6001 && is_a <scalar_int_mode> (mode, &int_mode)
6002 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6003 return simplify_shift_const
6004 (NULL_RTX, ASHIFTRT, int_mode,
6005 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6006 gen_rtx_XOR (int_mode, XEXP (x, 0),
6007 const1_rtx),
6008 GET_MODE_PRECISION (int_mode) - 1),
6009 GET_MODE_PRECISION (int_mode) - 1);
6011 /* If we are adding two things that have no bits in common, convert
6012 the addition into an IOR. This will often be further simplified,
6013 for example in cases like ((a & 1) + (a & 2)), which can
6014 become a & 3. */
6016 if (HWI_COMPUTABLE_MODE_P (mode)
6017 && (nonzero_bits (XEXP (x, 0), mode)
6018 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6020 /* Try to simplify the expression further. */
6021 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6022 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6024 /* If we could, great. If not, do not go ahead with the IOR
6025 replacement, since PLUS appears in many special purpose
6026 address arithmetic instructions. */
6027 if (GET_CODE (temp) != CLOBBER
6028 && (GET_CODE (temp) != IOR
6029 || ((XEXP (temp, 0) != XEXP (x, 0)
6030 || XEXP (temp, 1) != XEXP (x, 1))
6031 && (XEXP (temp, 0) != XEXP (x, 1)
6032 || XEXP (temp, 1) != XEXP (x, 0)))))
6033 return temp;
6036 /* Canonicalize x + x into x << 1. */
6037 if (GET_MODE_CLASS (mode) == MODE_INT
6038 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6039 && !side_effects_p (XEXP (x, 0)))
6040 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6042 break;
6044 case MINUS:
6045 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6046 (and <foo> (const_int pow2-1)) */
6047 if (is_a <scalar_int_mode> (mode, &int_mode)
6048 && GET_CODE (XEXP (x, 1)) == AND
6049 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6050 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6051 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6052 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6053 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6054 break;
6056 case MULT:
6057 /* If we have (mult (plus A B) C), apply the distributive law and then
6058 the inverse distributive law to see if things simplify. This
6059 occurs mostly in addresses, often when unrolling loops. */
6061 if (GET_CODE (XEXP (x, 0)) == PLUS)
6063 rtx result = distribute_and_simplify_rtx (x, 0);
6064 if (result)
6065 return result;
6068 /* Try simplify a*(b/c) as (a*b)/c. */
6069 if (FLOAT_MODE_P (mode) && flag_associative_math
6070 && GET_CODE (XEXP (x, 0)) == DIV)
6072 rtx tem = simplify_binary_operation (MULT, mode,
6073 XEXP (XEXP (x, 0), 0),
6074 XEXP (x, 1));
6075 if (tem)
6076 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6078 break;
6080 case UDIV:
6081 /* If this is a divide by a power of two, treat it as a shift if
6082 its first operand is a shift. */
6083 if (is_a <scalar_int_mode> (mode, &int_mode)
6084 && CONST_INT_P (XEXP (x, 1))
6085 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6086 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6087 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6088 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6089 || GET_CODE (XEXP (x, 0)) == ROTATE
6090 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6091 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6092 XEXP (x, 0), i);
6093 break;
6095 case EQ: case NE:
6096 case GT: case GTU: case GE: case GEU:
6097 case LT: case LTU: case LE: case LEU:
6098 case UNEQ: case LTGT:
6099 case UNGT: case UNGE:
6100 case UNLT: case UNLE:
6101 case UNORDERED: case ORDERED:
6102 /* If the first operand is a condition code, we can't do anything
6103 with it. */
6104 if (GET_CODE (XEXP (x, 0)) == COMPARE
6105 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6106 && ! CC0_P (XEXP (x, 0))))
6108 rtx op0 = XEXP (x, 0);
6109 rtx op1 = XEXP (x, 1);
6110 enum rtx_code new_code;
6112 if (GET_CODE (op0) == COMPARE)
6113 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6115 /* Simplify our comparison, if possible. */
6116 new_code = simplify_comparison (code, &op0, &op1);
6118 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6119 if only the low-order bit is possibly nonzero in X (such as when
6120 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6121 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6122 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6123 (plus X 1).
6125 Remove any ZERO_EXTRACT we made when thinking this was a
6126 comparison. It may now be simpler to use, e.g., an AND. If a
6127 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6128 the call to make_compound_operation in the SET case.
6130 Don't apply these optimizations if the caller would
6131 prefer a comparison rather than a value.
6132 E.g., for the condition in an IF_THEN_ELSE most targets need
6133 an explicit comparison. */
6135 if (in_cond)
6138 else if (STORE_FLAG_VALUE == 1
6139 && new_code == NE
6140 && is_int_mode (mode, &int_mode)
6141 && op1 == const0_rtx
6142 && int_mode == GET_MODE (op0)
6143 && nonzero_bits (op0, int_mode) == 1)
6144 return gen_lowpart (int_mode,
6145 expand_compound_operation (op0));
6147 else if (STORE_FLAG_VALUE == 1
6148 && new_code == NE
6149 && is_int_mode (mode, &int_mode)
6150 && op1 == const0_rtx
6151 && int_mode == GET_MODE (op0)
6152 && (num_sign_bit_copies (op0, int_mode)
6153 == GET_MODE_PRECISION (int_mode)))
6155 op0 = expand_compound_operation (op0);
6156 return simplify_gen_unary (NEG, int_mode,
6157 gen_lowpart (int_mode, op0),
6158 int_mode);
6161 else if (STORE_FLAG_VALUE == 1
6162 && new_code == EQ
6163 && is_int_mode (mode, &int_mode)
6164 && op1 == const0_rtx
6165 && int_mode == GET_MODE (op0)
6166 && nonzero_bits (op0, int_mode) == 1)
6168 op0 = expand_compound_operation (op0);
6169 return simplify_gen_binary (XOR, int_mode,
6170 gen_lowpart (int_mode, op0),
6171 const1_rtx);
6174 else if (STORE_FLAG_VALUE == 1
6175 && new_code == EQ
6176 && is_int_mode (mode, &int_mode)
6177 && op1 == const0_rtx
6178 && int_mode == GET_MODE (op0)
6179 && (num_sign_bit_copies (op0, int_mode)
6180 == GET_MODE_PRECISION (int_mode)))
6182 op0 = expand_compound_operation (op0);
6183 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6186 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6187 those above. */
6188 if (in_cond)
6191 else if (STORE_FLAG_VALUE == -1
6192 && new_code == NE
6193 && is_int_mode (mode, &int_mode)
6194 && op1 == const0_rtx
6195 && int_mode == GET_MODE (op0)
6196 && (num_sign_bit_copies (op0, int_mode)
6197 == GET_MODE_PRECISION (int_mode)))
6198 return gen_lowpart (int_mode, expand_compound_operation (op0));
6200 else if (STORE_FLAG_VALUE == -1
6201 && new_code == NE
6202 && is_int_mode (mode, &int_mode)
6203 && op1 == const0_rtx
6204 && int_mode == GET_MODE (op0)
6205 && nonzero_bits (op0, int_mode) == 1)
6207 op0 = expand_compound_operation (op0);
6208 return simplify_gen_unary (NEG, int_mode,
6209 gen_lowpart (int_mode, op0),
6210 int_mode);
6213 else if (STORE_FLAG_VALUE == -1
6214 && new_code == EQ
6215 && is_int_mode (mode, &int_mode)
6216 && op1 == const0_rtx
6217 && int_mode == GET_MODE (op0)
6218 && (num_sign_bit_copies (op0, int_mode)
6219 == GET_MODE_PRECISION (int_mode)))
6221 op0 = expand_compound_operation (op0);
6222 return simplify_gen_unary (NOT, int_mode,
6223 gen_lowpart (int_mode, op0),
6224 int_mode);
6227 /* If X is 0/1, (eq X 0) is X-1. */
6228 else if (STORE_FLAG_VALUE == -1
6229 && new_code == EQ
6230 && is_int_mode (mode, &int_mode)
6231 && op1 == const0_rtx
6232 && int_mode == GET_MODE (op0)
6233 && nonzero_bits (op0, int_mode) == 1)
6235 op0 = expand_compound_operation (op0);
6236 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6239 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6240 one bit that might be nonzero, we can convert (ne x 0) to
6241 (ashift x c) where C puts the bit in the sign bit. Remove any
6242 AND with STORE_FLAG_VALUE when we are done, since we are only
6243 going to test the sign bit. */
6244 if (new_code == NE
6245 && is_int_mode (mode, &int_mode)
6246 && HWI_COMPUTABLE_MODE_P (int_mode)
6247 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6248 && op1 == const0_rtx
6249 && int_mode == GET_MODE (op0)
6250 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6252 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6253 expand_compound_operation (op0),
6254 GET_MODE_PRECISION (int_mode) - 1 - i);
6255 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6256 return XEXP (x, 0);
6257 else
6258 return x;
6261 /* If the code changed, return a whole new comparison.
6262 We also need to avoid using SUBST in cases where
6263 simplify_comparison has widened a comparison with a CONST_INT,
6264 since in that case the wider CONST_INT may fail the sanity
6265 checks in do_SUBST. */
6266 if (new_code != code
6267 || (CONST_INT_P (op1)
6268 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6269 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6270 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6272 /* Otherwise, keep this operation, but maybe change its operands.
6273 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6274 SUBST (XEXP (x, 0), op0);
6275 SUBST (XEXP (x, 1), op1);
6277 break;
6279 case IF_THEN_ELSE:
6280 return simplify_if_then_else (x);
6282 case ZERO_EXTRACT:
6283 case SIGN_EXTRACT:
6284 case ZERO_EXTEND:
6285 case SIGN_EXTEND:
6286 /* If we are processing SET_DEST, we are done. */
6287 if (in_dest)
6288 return x;
6290 return expand_compound_operation (x);
6292 case SET:
6293 return simplify_set (x);
6295 case AND:
6296 case IOR:
6297 return simplify_logical (x);
6299 case ASHIFT:
6300 case LSHIFTRT:
6301 case ASHIFTRT:
6302 case ROTATE:
6303 case ROTATERT:
6304 /* If this is a shift by a constant amount, simplify it. */
6305 if (CONST_INT_P (XEXP (x, 1)))
6306 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6307 INTVAL (XEXP (x, 1)));
6309 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6310 SUBST (XEXP (x, 1),
6311 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6312 (HOST_WIDE_INT_1U
6313 << exact_log2 (GET_MODE_UNIT_BITSIZE
6314 (GET_MODE (x))))
6315 - 1,
6316 0));
6317 break;
6319 default:
6320 break;
6323 return x;
6326 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6328 static rtx
6329 simplify_if_then_else (rtx x)
6331 machine_mode mode = GET_MODE (x);
6332 rtx cond = XEXP (x, 0);
6333 rtx true_rtx = XEXP (x, 1);
6334 rtx false_rtx = XEXP (x, 2);
6335 enum rtx_code true_code = GET_CODE (cond);
6336 int comparison_p = COMPARISON_P (cond);
6337 rtx temp;
6338 int i;
6339 enum rtx_code false_code;
6340 rtx reversed;
6341 scalar_int_mode int_mode, inner_mode;
6343 /* Simplify storing of the truth value. */
6344 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6345 return simplify_gen_relational (true_code, mode, VOIDmode,
6346 XEXP (cond, 0), XEXP (cond, 1));
6348 /* Also when the truth value has to be reversed. */
6349 if (comparison_p
6350 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6351 && (reversed = reversed_comparison (cond, mode)))
6352 return reversed;
6354 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6355 in it is being compared against certain values. Get the true and false
6356 comparisons and see if that says anything about the value of each arm. */
6358 if (comparison_p
6359 && ((false_code = reversed_comparison_code (cond, NULL))
6360 != UNKNOWN)
6361 && REG_P (XEXP (cond, 0)))
6363 HOST_WIDE_INT nzb;
6364 rtx from = XEXP (cond, 0);
6365 rtx true_val = XEXP (cond, 1);
6366 rtx false_val = true_val;
6367 int swapped = 0;
6369 /* If FALSE_CODE is EQ, swap the codes and arms. */
6371 if (false_code == EQ)
6373 swapped = 1, true_code = EQ, false_code = NE;
6374 std::swap (true_rtx, false_rtx);
6377 scalar_int_mode from_mode;
6378 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6380 /* If we are comparing against zero and the expression being
6381 tested has only a single bit that might be nonzero, that is
6382 its value when it is not equal to zero. Similarly if it is
6383 known to be -1 or 0. */
6384 if (true_code == EQ
6385 && true_val == const0_rtx
6386 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6388 false_code = EQ;
6389 false_val = gen_int_mode (nzb, from_mode);
6391 else if (true_code == EQ
6392 && true_val == const0_rtx
6393 && (num_sign_bit_copies (from, from_mode)
6394 == GET_MODE_PRECISION (from_mode)))
6396 false_code = EQ;
6397 false_val = constm1_rtx;
6401 /* Now simplify an arm if we know the value of the register in the
6402 branch and it is used in the arm. Be careful due to the potential
6403 of locally-shared RTL. */
6405 if (reg_mentioned_p (from, true_rtx))
6406 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6407 from, true_val),
6408 pc_rtx, pc_rtx, 0, 0, 0);
6409 if (reg_mentioned_p (from, false_rtx))
6410 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6411 from, false_val),
6412 pc_rtx, pc_rtx, 0, 0, 0);
6414 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6415 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6417 true_rtx = XEXP (x, 1);
6418 false_rtx = XEXP (x, 2);
6419 true_code = GET_CODE (cond);
6422 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6423 reversed, do so to avoid needing two sets of patterns for
6424 subtract-and-branch insns. Similarly if we have a constant in the true
6425 arm, the false arm is the same as the first operand of the comparison, or
6426 the false arm is more complicated than the true arm. */
6428 if (comparison_p
6429 && reversed_comparison_code (cond, NULL) != UNKNOWN
6430 && (true_rtx == pc_rtx
6431 || (CONSTANT_P (true_rtx)
6432 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6433 || true_rtx == const0_rtx
6434 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6435 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6436 && !OBJECT_P (false_rtx))
6437 || reg_mentioned_p (true_rtx, false_rtx)
6438 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6440 true_code = reversed_comparison_code (cond, NULL);
6441 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6442 SUBST (XEXP (x, 1), false_rtx);
6443 SUBST (XEXP (x, 2), true_rtx);
6445 std::swap (true_rtx, false_rtx);
6446 cond = XEXP (x, 0);
6448 /* It is possible that the conditional has been simplified out. */
6449 true_code = GET_CODE (cond);
6450 comparison_p = COMPARISON_P (cond);
6453 /* If the two arms are identical, we don't need the comparison. */
6455 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6456 return true_rtx;
6458 /* Convert a == b ? b : a to "a". */
6459 if (true_code == EQ && ! side_effects_p (cond)
6460 && !HONOR_NANS (mode)
6461 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6462 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6463 return false_rtx;
6464 else if (true_code == NE && ! side_effects_p (cond)
6465 && !HONOR_NANS (mode)
6466 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6467 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6468 return true_rtx;
6470 /* Look for cases where we have (abs x) or (neg (abs X)). */
6472 if (GET_MODE_CLASS (mode) == MODE_INT
6473 && comparison_p
6474 && XEXP (cond, 1) == const0_rtx
6475 && GET_CODE (false_rtx) == NEG
6476 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6477 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6478 && ! side_effects_p (true_rtx))
6479 switch (true_code)
6481 case GT:
6482 case GE:
6483 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6484 case LT:
6485 case LE:
6486 return
6487 simplify_gen_unary (NEG, mode,
6488 simplify_gen_unary (ABS, mode, true_rtx, mode),
6489 mode);
6490 default:
6491 break;
6494 /* Look for MIN or MAX. */
6496 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6497 && comparison_p
6498 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6499 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6500 && ! side_effects_p (cond))
6501 switch (true_code)
6503 case GE:
6504 case GT:
6505 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6506 case LE:
6507 case LT:
6508 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6509 case GEU:
6510 case GTU:
6511 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6512 case LEU:
6513 case LTU:
6514 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6515 default:
6516 break;
6519 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6520 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6521 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6522 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6523 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6524 neither 1 or -1, but it isn't worth checking for. */
6526 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6527 && comparison_p
6528 && is_int_mode (mode, &int_mode)
6529 && ! side_effects_p (x))
6531 rtx t = make_compound_operation (true_rtx, SET);
6532 rtx f = make_compound_operation (false_rtx, SET);
6533 rtx cond_op0 = XEXP (cond, 0);
6534 rtx cond_op1 = XEXP (cond, 1);
6535 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6536 scalar_int_mode m = int_mode;
6537 rtx z = 0, c1 = NULL_RTX;
6539 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6540 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6541 || GET_CODE (t) == ASHIFT
6542 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6543 && rtx_equal_p (XEXP (t, 0), f))
6544 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6546 /* If an identity-zero op is commutative, check whether there
6547 would be a match if we swapped the operands. */
6548 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6549 || GET_CODE (t) == XOR)
6550 && rtx_equal_p (XEXP (t, 1), f))
6551 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6552 else if (GET_CODE (t) == SIGN_EXTEND
6553 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6554 && (GET_CODE (XEXP (t, 0)) == PLUS
6555 || GET_CODE (XEXP (t, 0)) == MINUS
6556 || GET_CODE (XEXP (t, 0)) == IOR
6557 || GET_CODE (XEXP (t, 0)) == XOR
6558 || GET_CODE (XEXP (t, 0)) == ASHIFT
6559 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6560 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6561 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6562 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6563 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6564 && (num_sign_bit_copies (f, GET_MODE (f))
6565 > (unsigned int)
6566 (GET_MODE_PRECISION (int_mode)
6567 - GET_MODE_PRECISION (inner_mode))))
6569 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6570 extend_op = SIGN_EXTEND;
6571 m = inner_mode;
6573 else if (GET_CODE (t) == SIGN_EXTEND
6574 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6575 && (GET_CODE (XEXP (t, 0)) == PLUS
6576 || GET_CODE (XEXP (t, 0)) == IOR
6577 || GET_CODE (XEXP (t, 0)) == XOR)
6578 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6579 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6580 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6581 && (num_sign_bit_copies (f, GET_MODE (f))
6582 > (unsigned int)
6583 (GET_MODE_PRECISION (int_mode)
6584 - GET_MODE_PRECISION (inner_mode))))
6586 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6587 extend_op = SIGN_EXTEND;
6588 m = inner_mode;
6590 else if (GET_CODE (t) == ZERO_EXTEND
6591 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6592 && (GET_CODE (XEXP (t, 0)) == PLUS
6593 || GET_CODE (XEXP (t, 0)) == MINUS
6594 || GET_CODE (XEXP (t, 0)) == IOR
6595 || GET_CODE (XEXP (t, 0)) == XOR
6596 || GET_CODE (XEXP (t, 0)) == ASHIFT
6597 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6598 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6599 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6600 && HWI_COMPUTABLE_MODE_P (int_mode)
6601 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6602 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6603 && ((nonzero_bits (f, GET_MODE (f))
6604 & ~GET_MODE_MASK (inner_mode))
6605 == 0))
6607 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6608 extend_op = ZERO_EXTEND;
6609 m = inner_mode;
6611 else if (GET_CODE (t) == ZERO_EXTEND
6612 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6613 && (GET_CODE (XEXP (t, 0)) == PLUS
6614 || GET_CODE (XEXP (t, 0)) == IOR
6615 || GET_CODE (XEXP (t, 0)) == XOR)
6616 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6617 && HWI_COMPUTABLE_MODE_P (int_mode)
6618 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6619 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6620 && ((nonzero_bits (f, GET_MODE (f))
6621 & ~GET_MODE_MASK (inner_mode))
6622 == 0))
6624 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6625 extend_op = ZERO_EXTEND;
6626 m = inner_mode;
6629 if (z)
6631 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6632 cond_op0, cond_op1),
6633 pc_rtx, pc_rtx, 0, 0, 0);
6634 temp = simplify_gen_binary (MULT, m, temp,
6635 simplify_gen_binary (MULT, m, c1,
6636 const_true_rtx));
6637 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6638 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6640 if (extend_op != UNKNOWN)
6641 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6643 return temp;
6647 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6648 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6649 negation of a single bit, we can convert this operation to a shift. We
6650 can actually do this more generally, but it doesn't seem worth it. */
6652 if (true_code == NE
6653 && is_a <scalar_int_mode> (mode, &int_mode)
6654 && XEXP (cond, 1) == const0_rtx
6655 && false_rtx == const0_rtx
6656 && CONST_INT_P (true_rtx)
6657 && ((1 == nonzero_bits (XEXP (cond, 0), int_mode)
6658 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6659 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6660 == GET_MODE_PRECISION (int_mode))
6661 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6662 return
6663 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6664 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6666 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6667 non-zero bit in A is C1. */
6668 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6669 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6670 && is_a <scalar_int_mode> (mode, &int_mode)
6671 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6672 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6673 == nonzero_bits (XEXP (cond, 0), inner_mode)
6674 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6676 rtx val = XEXP (cond, 0);
6677 if (inner_mode == int_mode)
6678 return val;
6679 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6680 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6683 return x;
6686 /* Simplify X, a SET expression. Return the new expression. */
6688 static rtx
6689 simplify_set (rtx x)
6691 rtx src = SET_SRC (x);
6692 rtx dest = SET_DEST (x);
6693 machine_mode mode
6694 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6695 rtx_insn *other_insn;
6696 rtx *cc_use;
6697 scalar_int_mode int_mode;
6699 /* (set (pc) (return)) gets written as (return). */
6700 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6701 return src;
6703 /* Now that we know for sure which bits of SRC we are using, see if we can
6704 simplify the expression for the object knowing that we only need the
6705 low-order bits. */
6707 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6709 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6710 SUBST (SET_SRC (x), src);
6713 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6714 the comparison result and try to simplify it unless we already have used
6715 undobuf.other_insn. */
6716 if ((GET_MODE_CLASS (mode) == MODE_CC
6717 || GET_CODE (src) == COMPARE
6718 || CC0_P (dest))
6719 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6720 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6721 && COMPARISON_P (*cc_use)
6722 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6724 enum rtx_code old_code = GET_CODE (*cc_use);
6725 enum rtx_code new_code;
6726 rtx op0, op1, tmp;
6727 int other_changed = 0;
6728 rtx inner_compare = NULL_RTX;
6729 machine_mode compare_mode = GET_MODE (dest);
6731 if (GET_CODE (src) == COMPARE)
6733 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6734 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6736 inner_compare = op0;
6737 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6740 else
6741 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6743 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6744 op0, op1);
6745 if (!tmp)
6746 new_code = old_code;
6747 else if (!CONSTANT_P (tmp))
6749 new_code = GET_CODE (tmp);
6750 op0 = XEXP (tmp, 0);
6751 op1 = XEXP (tmp, 1);
6753 else
6755 rtx pat = PATTERN (other_insn);
6756 undobuf.other_insn = other_insn;
6757 SUBST (*cc_use, tmp);
6759 /* Attempt to simplify CC user. */
6760 if (GET_CODE (pat) == SET)
6762 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6763 if (new_rtx != NULL_RTX)
6764 SUBST (SET_SRC (pat), new_rtx);
6767 /* Convert X into a no-op move. */
6768 SUBST (SET_DEST (x), pc_rtx);
6769 SUBST (SET_SRC (x), pc_rtx);
6770 return x;
6773 /* Simplify our comparison, if possible. */
6774 new_code = simplify_comparison (new_code, &op0, &op1);
6776 #ifdef SELECT_CC_MODE
6777 /* If this machine has CC modes other than CCmode, check to see if we
6778 need to use a different CC mode here. */
6779 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6780 compare_mode = GET_MODE (op0);
6781 else if (inner_compare
6782 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6783 && new_code == old_code
6784 && op0 == XEXP (inner_compare, 0)
6785 && op1 == XEXP (inner_compare, 1))
6786 compare_mode = GET_MODE (inner_compare);
6787 else
6788 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6790 /* If the mode changed, we have to change SET_DEST, the mode in the
6791 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6792 a hard register, just build new versions with the proper mode. If it
6793 is a pseudo, we lose unless it is only time we set the pseudo, in
6794 which case we can safely change its mode. */
6795 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6797 if (can_change_dest_mode (dest, 0, compare_mode))
6799 unsigned int regno = REGNO (dest);
6800 rtx new_dest;
6802 if (regno < FIRST_PSEUDO_REGISTER)
6803 new_dest = gen_rtx_REG (compare_mode, regno);
6804 else
6806 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6807 new_dest = regno_reg_rtx[regno];
6810 SUBST (SET_DEST (x), new_dest);
6811 SUBST (XEXP (*cc_use, 0), new_dest);
6812 other_changed = 1;
6814 dest = new_dest;
6817 #endif /* SELECT_CC_MODE */
6819 /* If the code changed, we have to build a new comparison in
6820 undobuf.other_insn. */
6821 if (new_code != old_code)
6823 int other_changed_previously = other_changed;
6824 unsigned HOST_WIDE_INT mask;
6825 rtx old_cc_use = *cc_use;
6827 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6828 dest, const0_rtx));
6829 other_changed = 1;
6831 /* If the only change we made was to change an EQ into an NE or
6832 vice versa, OP0 has only one bit that might be nonzero, and OP1
6833 is zero, check if changing the user of the condition code will
6834 produce a valid insn. If it won't, we can keep the original code
6835 in that insn by surrounding our operation with an XOR. */
6837 if (((old_code == NE && new_code == EQ)
6838 || (old_code == EQ && new_code == NE))
6839 && ! other_changed_previously && op1 == const0_rtx
6840 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6841 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6843 rtx pat = PATTERN (other_insn), note = 0;
6845 if ((recog_for_combine (&pat, other_insn, &note) < 0
6846 && ! check_asm_operands (pat)))
6848 *cc_use = old_cc_use;
6849 other_changed = 0;
6851 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6852 gen_int_mode (mask,
6853 GET_MODE (op0)));
6858 if (other_changed)
6859 undobuf.other_insn = other_insn;
6861 /* Don't generate a compare of a CC with 0, just use that CC. */
6862 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6864 SUBST (SET_SRC (x), op0);
6865 src = SET_SRC (x);
6867 /* Otherwise, if we didn't previously have the same COMPARE we
6868 want, create it from scratch. */
6869 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6870 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6872 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6873 src = SET_SRC (x);
6876 else
6878 /* Get SET_SRC in a form where we have placed back any
6879 compound expressions. Then do the checks below. */
6880 src = make_compound_operation (src, SET);
6881 SUBST (SET_SRC (x), src);
6884 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6885 and X being a REG or (subreg (reg)), we may be able to convert this to
6886 (set (subreg:m2 x) (op)).
6888 We can always do this if M1 is narrower than M2 because that means that
6889 we only care about the low bits of the result.
6891 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6892 perform a narrower operation than requested since the high-order bits will
6893 be undefined. On machine where it is defined, this transformation is safe
6894 as long as M1 and M2 have the same number of words. */
6896 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6897 && !OBJECT_P (SUBREG_REG (src))
6898 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6899 / UNITS_PER_WORD)
6900 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6901 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6902 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6903 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6904 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6905 GET_MODE (SUBREG_REG (src)),
6906 GET_MODE (src)))
6907 && (REG_P (dest)
6908 || (GET_CODE (dest) == SUBREG
6909 && REG_P (SUBREG_REG (dest)))))
6911 SUBST (SET_DEST (x),
6912 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6913 dest));
6914 SUBST (SET_SRC (x), SUBREG_REG (src));
6916 src = SET_SRC (x), dest = SET_DEST (x);
6919 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6920 in SRC. */
6921 if (dest == cc0_rtx
6922 && partial_subreg_p (src)
6923 && subreg_lowpart_p (src))
6925 rtx inner = SUBREG_REG (src);
6926 machine_mode inner_mode = GET_MODE (inner);
6928 /* Here we make sure that we don't have a sign bit on. */
6929 if (val_signbit_known_clear_p (GET_MODE (src),
6930 nonzero_bits (inner, inner_mode)))
6932 SUBST (SET_SRC (x), inner);
6933 src = SET_SRC (x);
6937 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6938 would require a paradoxical subreg. Replace the subreg with a
6939 zero_extend to avoid the reload that would otherwise be required. */
6941 enum rtx_code extend_op;
6942 if (paradoxical_subreg_p (src)
6943 && MEM_P (SUBREG_REG (src))
6944 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6946 SUBST (SET_SRC (x),
6947 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6949 src = SET_SRC (x);
6952 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6953 are comparing an item known to be 0 or -1 against 0, use a logical
6954 operation instead. Check for one of the arms being an IOR of the other
6955 arm with some value. We compute three terms to be IOR'ed together. In
6956 practice, at most two will be nonzero. Then we do the IOR's. */
6958 if (GET_CODE (dest) != PC
6959 && GET_CODE (src) == IF_THEN_ELSE
6960 && is_int_mode (GET_MODE (src), &int_mode)
6961 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6962 && XEXP (XEXP (src, 0), 1) == const0_rtx
6963 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
6964 && (!HAVE_conditional_move
6965 || ! can_conditionally_move_p (int_mode))
6966 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
6967 == GET_MODE_PRECISION (int_mode))
6968 && ! side_effects_p (src))
6970 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6971 ? XEXP (src, 1) : XEXP (src, 2));
6972 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6973 ? XEXP (src, 2) : XEXP (src, 1));
6974 rtx term1 = const0_rtx, term2, term3;
6976 if (GET_CODE (true_rtx) == IOR
6977 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6978 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6979 else if (GET_CODE (true_rtx) == IOR
6980 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6981 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6982 else if (GET_CODE (false_rtx) == IOR
6983 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6984 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6985 else if (GET_CODE (false_rtx) == IOR
6986 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6987 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6989 term2 = simplify_gen_binary (AND, int_mode,
6990 XEXP (XEXP (src, 0), 0), true_rtx);
6991 term3 = simplify_gen_binary (AND, int_mode,
6992 simplify_gen_unary (NOT, int_mode,
6993 XEXP (XEXP (src, 0), 0),
6994 int_mode),
6995 false_rtx);
6997 SUBST (SET_SRC (x),
6998 simplify_gen_binary (IOR, int_mode,
6999 simplify_gen_binary (IOR, int_mode,
7000 term1, term2),
7001 term3));
7003 src = SET_SRC (x);
7006 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7007 whole thing fail. */
7008 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7009 return src;
7010 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7011 return dest;
7012 else
7013 /* Convert this into a field assignment operation, if possible. */
7014 return make_field_assignment (x);
7017 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7018 result. */
7020 static rtx
7021 simplify_logical (rtx x)
7023 rtx op0 = XEXP (x, 0);
7024 rtx op1 = XEXP (x, 1);
7025 scalar_int_mode mode;
7027 switch (GET_CODE (x))
7029 case AND:
7030 /* We can call simplify_and_const_int only if we don't lose
7031 any (sign) bits when converting INTVAL (op1) to
7032 "unsigned HOST_WIDE_INT". */
7033 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7034 && CONST_INT_P (op1)
7035 && (HWI_COMPUTABLE_MODE_P (mode)
7036 || INTVAL (op1) > 0))
7038 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7039 if (GET_CODE (x) != AND)
7040 return x;
7042 op0 = XEXP (x, 0);
7043 op1 = XEXP (x, 1);
7046 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7047 apply the distributive law and then the inverse distributive
7048 law to see if things simplify. */
7049 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7051 rtx result = distribute_and_simplify_rtx (x, 0);
7052 if (result)
7053 return result;
7055 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7057 rtx result = distribute_and_simplify_rtx (x, 1);
7058 if (result)
7059 return result;
7061 break;
7063 case IOR:
7064 /* If we have (ior (and A B) C), apply the distributive law and then
7065 the inverse distributive law to see if things simplify. */
7067 if (GET_CODE (op0) == AND)
7069 rtx result = distribute_and_simplify_rtx (x, 0);
7070 if (result)
7071 return result;
7074 if (GET_CODE (op1) == AND)
7076 rtx result = distribute_and_simplify_rtx (x, 1);
7077 if (result)
7078 return result;
7080 break;
7082 default:
7083 gcc_unreachable ();
7086 return x;
7089 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7090 operations" because they can be replaced with two more basic operations.
7091 ZERO_EXTEND is also considered "compound" because it can be replaced with
7092 an AND operation, which is simpler, though only one operation.
7094 The function expand_compound_operation is called with an rtx expression
7095 and will convert it to the appropriate shifts and AND operations,
7096 simplifying at each stage.
7098 The function make_compound_operation is called to convert an expression
7099 consisting of shifts and ANDs into the equivalent compound expression.
7100 It is the inverse of this function, loosely speaking. */
7102 static rtx
7103 expand_compound_operation (rtx x)
7105 unsigned HOST_WIDE_INT pos = 0, len;
7106 int unsignedp = 0;
7107 unsigned int modewidth;
7108 rtx tem;
7109 scalar_int_mode inner_mode;
7111 switch (GET_CODE (x))
7113 case ZERO_EXTEND:
7114 unsignedp = 1;
7115 /* FALLTHRU */
7116 case SIGN_EXTEND:
7117 /* We can't necessarily use a const_int for a multiword mode;
7118 it depends on implicitly extending the value.
7119 Since we don't know the right way to extend it,
7120 we can't tell whether the implicit way is right.
7122 Even for a mode that is no wider than a const_int,
7123 we can't win, because we need to sign extend one of its bits through
7124 the rest of it, and we don't know which bit. */
7125 if (CONST_INT_P (XEXP (x, 0)))
7126 return x;
7128 /* Reject modes that aren't scalar integers because turning vector
7129 or complex modes into shifts causes problems. */
7130 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7131 return x;
7133 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7134 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7135 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7136 reloaded. If not for that, MEM's would very rarely be safe.
7138 Reject modes bigger than a word, because we might not be able
7139 to reference a two-register group starting with an arbitrary register
7140 (and currently gen_lowpart might crash for a SUBREG). */
7142 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7143 return x;
7145 len = GET_MODE_PRECISION (inner_mode);
7146 /* If the inner object has VOIDmode (the only way this can happen
7147 is if it is an ASM_OPERANDS), we can't do anything since we don't
7148 know how much masking to do. */
7149 if (len == 0)
7150 return x;
7152 break;
7154 case ZERO_EXTRACT:
7155 unsignedp = 1;
7157 /* fall through */
7159 case SIGN_EXTRACT:
7160 /* If the operand is a CLOBBER, just return it. */
7161 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7162 return XEXP (x, 0);
7164 if (!CONST_INT_P (XEXP (x, 1))
7165 || !CONST_INT_P (XEXP (x, 2)))
7166 return x;
7168 /* Reject modes that aren't scalar integers because turning vector
7169 or complex modes into shifts causes problems. */
7170 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7171 return x;
7173 len = INTVAL (XEXP (x, 1));
7174 pos = INTVAL (XEXP (x, 2));
7176 /* This should stay within the object being extracted, fail otherwise. */
7177 if (len + pos > GET_MODE_PRECISION (inner_mode))
7178 return x;
7180 if (BITS_BIG_ENDIAN)
7181 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7183 break;
7185 default:
7186 return x;
7189 /* We've rejected non-scalar operations by now. */
7190 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7192 /* Convert sign extension to zero extension, if we know that the high
7193 bit is not set, as this is easier to optimize. It will be converted
7194 back to cheaper alternative in make_extraction. */
7195 if (GET_CODE (x) == SIGN_EXTEND
7196 && HWI_COMPUTABLE_MODE_P (mode)
7197 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7198 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7199 == 0))
7201 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7202 rtx temp2 = expand_compound_operation (temp);
7204 /* Make sure this is a profitable operation. */
7205 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7206 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7207 return temp2;
7208 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7209 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7210 return temp;
7211 else
7212 return x;
7215 /* We can optimize some special cases of ZERO_EXTEND. */
7216 if (GET_CODE (x) == ZERO_EXTEND)
7218 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7219 know that the last value didn't have any inappropriate bits
7220 set. */
7221 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7222 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7223 && HWI_COMPUTABLE_MODE_P (mode)
7224 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7225 & ~GET_MODE_MASK (inner_mode)) == 0)
7226 return XEXP (XEXP (x, 0), 0);
7228 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7229 if (GET_CODE (XEXP (x, 0)) == SUBREG
7230 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7231 && subreg_lowpart_p (XEXP (x, 0))
7232 && HWI_COMPUTABLE_MODE_P (mode)
7233 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7234 & ~GET_MODE_MASK (inner_mode)) == 0)
7235 return SUBREG_REG (XEXP (x, 0));
7237 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7238 is a comparison and STORE_FLAG_VALUE permits. This is like
7239 the first case, but it works even when MODE is larger
7240 than HOST_WIDE_INT. */
7241 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7242 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7243 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7244 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7245 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7246 return XEXP (XEXP (x, 0), 0);
7248 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7249 if (GET_CODE (XEXP (x, 0)) == SUBREG
7250 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7251 && subreg_lowpart_p (XEXP (x, 0))
7252 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7253 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7254 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7255 return SUBREG_REG (XEXP (x, 0));
7259 /* If we reach here, we want to return a pair of shifts. The inner
7260 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7261 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7262 logical depending on the value of UNSIGNEDP.
7264 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7265 converted into an AND of a shift.
7267 We must check for the case where the left shift would have a negative
7268 count. This can happen in a case like (x >> 31) & 255 on machines
7269 that can't shift by a constant. On those machines, we would first
7270 combine the shift with the AND to produce a variable-position
7271 extraction. Then the constant of 31 would be substituted in
7272 to produce such a position. */
7274 modewidth = GET_MODE_PRECISION (mode);
7275 if (modewidth >= pos + len)
7277 tem = gen_lowpart (mode, XEXP (x, 0));
7278 if (!tem || GET_CODE (tem) == CLOBBER)
7279 return x;
7280 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7281 tem, modewidth - pos - len);
7282 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7283 mode, tem, modewidth - len);
7285 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7286 tem = simplify_and_const_int (NULL_RTX, mode,
7287 simplify_shift_const (NULL_RTX, LSHIFTRT,
7288 mode, XEXP (x, 0),
7289 pos),
7290 (HOST_WIDE_INT_1U << len) - 1);
7291 else
7292 /* Any other cases we can't handle. */
7293 return x;
7295 /* If we couldn't do this for some reason, return the original
7296 expression. */
7297 if (GET_CODE (tem) == CLOBBER)
7298 return x;
7300 return tem;
7303 /* X is a SET which contains an assignment of one object into
7304 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7305 or certain SUBREGS). If possible, convert it into a series of
7306 logical operations.
7308 We half-heartedly support variable positions, but do not at all
7309 support variable lengths. */
7311 static const_rtx
7312 expand_field_assignment (const_rtx x)
7314 rtx inner;
7315 rtx pos; /* Always counts from low bit. */
7316 int len;
7317 rtx mask, cleared, masked;
7318 scalar_int_mode compute_mode;
7320 /* Loop until we find something we can't simplify. */
7321 while (1)
7323 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7324 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7326 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7327 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7328 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7330 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7331 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7333 inner = XEXP (SET_DEST (x), 0);
7334 len = INTVAL (XEXP (SET_DEST (x), 1));
7335 pos = XEXP (SET_DEST (x), 2);
7337 /* A constant position should stay within the width of INNER. */
7338 if (CONST_INT_P (pos)
7339 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7340 break;
7342 if (BITS_BIG_ENDIAN)
7344 if (CONST_INT_P (pos))
7345 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7346 - INTVAL (pos));
7347 else if (GET_CODE (pos) == MINUS
7348 && CONST_INT_P (XEXP (pos, 1))
7349 && (INTVAL (XEXP (pos, 1))
7350 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7351 /* If position is ADJUST - X, new position is X. */
7352 pos = XEXP (pos, 0);
7353 else
7355 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7356 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7357 gen_int_mode (prec - len,
7358 GET_MODE (pos)),
7359 pos);
7364 /* A SUBREG between two modes that occupy the same numbers of words
7365 can be done by moving the SUBREG to the source. */
7366 else if (GET_CODE (SET_DEST (x)) == SUBREG
7367 /* We need SUBREGs to compute nonzero_bits properly. */
7368 && nonzero_sign_valid
7369 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7370 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7371 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7372 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7374 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7375 gen_lowpart
7376 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7377 SET_SRC (x)));
7378 continue;
7380 else
7381 break;
7383 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7384 inner = SUBREG_REG (inner);
7386 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7387 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7389 /* Don't do anything for vector or complex integral types. */
7390 if (! FLOAT_MODE_P (GET_MODE (inner)))
7391 break;
7393 /* Try to find an integral mode to pun with. */
7394 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7395 .exists (&compute_mode))
7396 break;
7398 inner = gen_lowpart (compute_mode, inner);
7401 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7402 if (len >= HOST_BITS_PER_WIDE_INT)
7403 break;
7405 /* Don't try to compute in too wide unsupported modes. */
7406 if (!targetm.scalar_mode_supported_p (compute_mode))
7407 break;
7409 /* Now compute the equivalent expression. Make a copy of INNER
7410 for the SET_DEST in case it is a MEM into which we will substitute;
7411 we don't want shared RTL in that case. */
7412 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7413 compute_mode);
7414 cleared = simplify_gen_binary (AND, compute_mode,
7415 simplify_gen_unary (NOT, compute_mode,
7416 simplify_gen_binary (ASHIFT,
7417 compute_mode,
7418 mask, pos),
7419 compute_mode),
7420 inner);
7421 masked = simplify_gen_binary (ASHIFT, compute_mode,
7422 simplify_gen_binary (
7423 AND, compute_mode,
7424 gen_lowpart (compute_mode, SET_SRC (x)),
7425 mask),
7426 pos);
7428 x = gen_rtx_SET (copy_rtx (inner),
7429 simplify_gen_binary (IOR, compute_mode,
7430 cleared, masked));
7433 return x;
7436 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7437 it is an RTX that represents the (variable) starting position; otherwise,
7438 POS is the (constant) starting bit position. Both are counted from the LSB.
7440 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7442 IN_DEST is nonzero if this is a reference in the destination of a SET.
7443 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7444 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7445 be used.
7447 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7448 ZERO_EXTRACT should be built even for bits starting at bit 0.
7450 MODE is the desired mode of the result (if IN_DEST == 0).
7452 The result is an RTX for the extraction or NULL_RTX if the target
7453 can't handle it. */
7455 static rtx
7456 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7457 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7458 int in_dest, int in_compare)
7460 /* This mode describes the size of the storage area
7461 to fetch the overall value from. Within that, we
7462 ignore the POS lowest bits, etc. */
7463 machine_mode is_mode = GET_MODE (inner);
7464 machine_mode inner_mode;
7465 scalar_int_mode wanted_inner_mode;
7466 scalar_int_mode wanted_inner_reg_mode = word_mode;
7467 scalar_int_mode pos_mode = word_mode;
7468 machine_mode extraction_mode = word_mode;
7469 rtx new_rtx = 0;
7470 rtx orig_pos_rtx = pos_rtx;
7471 HOST_WIDE_INT orig_pos;
7473 if (pos_rtx && CONST_INT_P (pos_rtx))
7474 pos = INTVAL (pos_rtx), pos_rtx = 0;
7476 if (GET_CODE (inner) == SUBREG
7477 && subreg_lowpart_p (inner)
7478 && (paradoxical_subreg_p (inner)
7479 /* If trying or potentionally trying to extract
7480 bits outside of is_mode, don't look through
7481 non-paradoxical SUBREGs. See PR82192. */
7482 || (pos_rtx == NULL_RTX
7483 && pos + len <= GET_MODE_PRECISION (is_mode))))
7485 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7486 consider just the QI as the memory to extract from.
7487 The subreg adds or removes high bits; its mode is
7488 irrelevant to the meaning of this extraction,
7489 since POS and LEN count from the lsb. */
7490 if (MEM_P (SUBREG_REG (inner)))
7491 is_mode = GET_MODE (SUBREG_REG (inner));
7492 inner = SUBREG_REG (inner);
7494 else if (GET_CODE (inner) == ASHIFT
7495 && CONST_INT_P (XEXP (inner, 1))
7496 && pos_rtx == 0 && pos == 0
7497 && len > UINTVAL (XEXP (inner, 1)))
7499 /* We're extracting the least significant bits of an rtx
7500 (ashift X (const_int C)), where LEN > C. Extract the
7501 least significant (LEN - C) bits of X, giving an rtx
7502 whose mode is MODE, then shift it left C times. */
7503 new_rtx = make_extraction (mode, XEXP (inner, 0),
7504 0, 0, len - INTVAL (XEXP (inner, 1)),
7505 unsignedp, in_dest, in_compare);
7506 if (new_rtx != 0)
7507 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7509 else if (GET_CODE (inner) == TRUNCATE
7510 /* If trying or potentionally trying to extract
7511 bits outside of is_mode, don't look through
7512 TRUNCATE. See PR82192. */
7513 && pos_rtx == NULL_RTX
7514 && pos + len <= GET_MODE_PRECISION (is_mode))
7515 inner = XEXP (inner, 0);
7517 inner_mode = GET_MODE (inner);
7519 /* See if this can be done without an extraction. We never can if the
7520 width of the field is not the same as that of some integer mode. For
7521 registers, we can only avoid the extraction if the position is at the
7522 low-order bit and this is either not in the destination or we have the
7523 appropriate STRICT_LOW_PART operation available.
7525 For MEM, we can avoid an extract if the field starts on an appropriate
7526 boundary and we can change the mode of the memory reference. */
7528 scalar_int_mode tmode;
7529 if (int_mode_for_size (len, 1).exists (&tmode)
7530 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7531 && !MEM_P (inner)
7532 && (pos == 0 || REG_P (inner))
7533 && (inner_mode == tmode
7534 || !REG_P (inner)
7535 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7536 || reg_truncated_to_mode (tmode, inner))
7537 && (! in_dest
7538 || (REG_P (inner)
7539 && have_insn_for (STRICT_LOW_PART, tmode))))
7540 || (MEM_P (inner) && pos_rtx == 0
7541 && (pos
7542 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7543 : BITS_PER_UNIT)) == 0
7544 /* We can't do this if we are widening INNER_MODE (it
7545 may not be aligned, for one thing). */
7546 && !paradoxical_subreg_p (tmode, inner_mode)
7547 && (inner_mode == tmode
7548 || (! mode_dependent_address_p (XEXP (inner, 0),
7549 MEM_ADDR_SPACE (inner))
7550 && ! MEM_VOLATILE_P (inner))))))
7552 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7553 field. If the original and current mode are the same, we need not
7554 adjust the offset. Otherwise, we do if bytes big endian.
7556 If INNER is not a MEM, get a piece consisting of just the field
7557 of interest (in this case POS % BITS_PER_WORD must be 0). */
7559 if (MEM_P (inner))
7561 HOST_WIDE_INT offset;
7563 /* POS counts from lsb, but make OFFSET count in memory order. */
7564 if (BYTES_BIG_ENDIAN)
7565 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7566 else
7567 offset = pos / BITS_PER_UNIT;
7569 new_rtx = adjust_address_nv (inner, tmode, offset);
7571 else if (REG_P (inner))
7573 if (tmode != inner_mode)
7575 /* We can't call gen_lowpart in a DEST since we
7576 always want a SUBREG (see below) and it would sometimes
7577 return a new hard register. */
7578 if (pos || in_dest)
7580 unsigned int offset
7581 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7583 /* Avoid creating invalid subregs, for example when
7584 simplifying (x>>32)&255. */
7585 if (!validate_subreg (tmode, inner_mode, inner, offset))
7586 return NULL_RTX;
7588 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7590 else
7591 new_rtx = gen_lowpart (tmode, inner);
7593 else
7594 new_rtx = inner;
7596 else
7597 new_rtx = force_to_mode (inner, tmode,
7598 len >= HOST_BITS_PER_WIDE_INT
7599 ? HOST_WIDE_INT_M1U
7600 : (HOST_WIDE_INT_1U << len) - 1, 0);
7602 /* If this extraction is going into the destination of a SET,
7603 make a STRICT_LOW_PART unless we made a MEM. */
7605 if (in_dest)
7606 return (MEM_P (new_rtx) ? new_rtx
7607 : (GET_CODE (new_rtx) != SUBREG
7608 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7609 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7611 if (mode == tmode)
7612 return new_rtx;
7614 if (CONST_SCALAR_INT_P (new_rtx))
7615 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7616 mode, new_rtx, tmode);
7618 /* If we know that no extraneous bits are set, and that the high
7619 bit is not set, convert the extraction to the cheaper of
7620 sign and zero extension, that are equivalent in these cases. */
7621 if (flag_expensive_optimizations
7622 && (HWI_COMPUTABLE_MODE_P (tmode)
7623 && ((nonzero_bits (new_rtx, tmode)
7624 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7625 == 0)))
7627 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7628 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7630 /* Prefer ZERO_EXTENSION, since it gives more information to
7631 backends. */
7632 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7633 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7634 return temp;
7635 return temp1;
7638 /* Otherwise, sign- or zero-extend unless we already are in the
7639 proper mode. */
7641 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7642 mode, new_rtx));
7645 /* Unless this is a COMPARE or we have a funny memory reference,
7646 don't do anything with zero-extending field extracts starting at
7647 the low-order bit since they are simple AND operations. */
7648 if (pos_rtx == 0 && pos == 0 && ! in_dest
7649 && ! in_compare && unsignedp)
7650 return 0;
7652 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7653 if the position is not a constant and the length is not 1. In all
7654 other cases, we would only be going outside our object in cases when
7655 an original shift would have been undefined. */
7656 if (MEM_P (inner)
7657 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7658 || (pos_rtx != 0 && len != 1)))
7659 return 0;
7661 enum extraction_pattern pattern = (in_dest ? EP_insv
7662 : unsignedp ? EP_extzv : EP_extv);
7664 /* If INNER is not from memory, we want it to have the mode of a register
7665 extraction pattern's structure operand, or word_mode if there is no
7666 such pattern. The same applies to extraction_mode and pos_mode
7667 and their respective operands.
7669 For memory, assume that the desired extraction_mode and pos_mode
7670 are the same as for a register operation, since at present we don't
7671 have named patterns for aligned memory structures. */
7672 struct extraction_insn insn;
7673 if (get_best_reg_extraction_insn (&insn, pattern,
7674 GET_MODE_BITSIZE (inner_mode), mode))
7676 wanted_inner_reg_mode = insn.struct_mode.require ();
7677 pos_mode = insn.pos_mode;
7678 extraction_mode = insn.field_mode;
7681 /* Never narrow an object, since that might not be safe. */
7683 if (mode != VOIDmode
7684 && partial_subreg_p (extraction_mode, mode))
7685 extraction_mode = mode;
7687 if (!MEM_P (inner))
7688 wanted_inner_mode = wanted_inner_reg_mode;
7689 else
7691 /* Be careful not to go beyond the extracted object and maintain the
7692 natural alignment of the memory. */
7693 wanted_inner_mode = smallest_int_mode_for_size (len);
7694 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7695 > GET_MODE_BITSIZE (wanted_inner_mode))
7696 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7699 orig_pos = pos;
7701 if (BITS_BIG_ENDIAN)
7703 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7704 BITS_BIG_ENDIAN style. If position is constant, compute new
7705 position. Otherwise, build subtraction.
7706 Note that POS is relative to the mode of the original argument.
7707 If it's a MEM we need to recompute POS relative to that.
7708 However, if we're extracting from (or inserting into) a register,
7709 we want to recompute POS relative to wanted_inner_mode. */
7710 int width = (MEM_P (inner)
7711 ? GET_MODE_BITSIZE (is_mode)
7712 : GET_MODE_BITSIZE (wanted_inner_mode));
7714 if (pos_rtx == 0)
7715 pos = width - len - pos;
7716 else
7717 pos_rtx
7718 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7719 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7720 pos_rtx);
7721 /* POS may be less than 0 now, but we check for that below.
7722 Note that it can only be less than 0 if !MEM_P (inner). */
7725 /* If INNER has a wider mode, and this is a constant extraction, try to
7726 make it smaller and adjust the byte to point to the byte containing
7727 the value. */
7728 if (wanted_inner_mode != VOIDmode
7729 && inner_mode != wanted_inner_mode
7730 && ! pos_rtx
7731 && partial_subreg_p (wanted_inner_mode, is_mode)
7732 && MEM_P (inner)
7733 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7734 && ! MEM_VOLATILE_P (inner))
7736 int offset = 0;
7738 /* The computations below will be correct if the machine is big
7739 endian in both bits and bytes or little endian in bits and bytes.
7740 If it is mixed, we must adjust. */
7742 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7743 adjust OFFSET to compensate. */
7744 if (BYTES_BIG_ENDIAN
7745 && paradoxical_subreg_p (is_mode, inner_mode))
7746 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7748 /* We can now move to the desired byte. */
7749 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7750 * GET_MODE_SIZE (wanted_inner_mode);
7751 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7753 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7754 && is_mode != wanted_inner_mode)
7755 offset = (GET_MODE_SIZE (is_mode)
7756 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7758 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7761 /* If INNER is not memory, get it into the proper mode. If we are changing
7762 its mode, POS must be a constant and smaller than the size of the new
7763 mode. */
7764 else if (!MEM_P (inner))
7766 /* On the LHS, don't create paradoxical subregs implicitely truncating
7767 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7768 if (in_dest
7769 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7770 wanted_inner_mode))
7771 return NULL_RTX;
7773 if (GET_MODE (inner) != wanted_inner_mode
7774 && (pos_rtx != 0
7775 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7776 return NULL_RTX;
7778 if (orig_pos < 0)
7779 return NULL_RTX;
7781 inner = force_to_mode (inner, wanted_inner_mode,
7782 pos_rtx
7783 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7784 ? HOST_WIDE_INT_M1U
7785 : (((HOST_WIDE_INT_1U << len) - 1)
7786 << orig_pos),
7790 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7791 have to zero extend. Otherwise, we can just use a SUBREG.
7793 We dealt with constant rtxes earlier, so pos_rtx cannot
7794 have VOIDmode at this point. */
7795 if (pos_rtx != 0
7796 && (GET_MODE_SIZE (pos_mode)
7797 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7799 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7800 GET_MODE (pos_rtx));
7802 /* If we know that no extraneous bits are set, and that the high
7803 bit is not set, convert extraction to cheaper one - either
7804 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7805 cases. */
7806 if (flag_expensive_optimizations
7807 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7808 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7809 & ~(((unsigned HOST_WIDE_INT)
7810 GET_MODE_MASK (GET_MODE (pos_rtx)))
7811 >> 1))
7812 == 0)))
7814 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7815 GET_MODE (pos_rtx));
7817 /* Prefer ZERO_EXTENSION, since it gives more information to
7818 backends. */
7819 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7820 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7821 temp = temp1;
7823 pos_rtx = temp;
7826 /* Make POS_RTX unless we already have it and it is correct. If we don't
7827 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7828 be a CONST_INT. */
7829 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7830 pos_rtx = orig_pos_rtx;
7832 else if (pos_rtx == 0)
7833 pos_rtx = GEN_INT (pos);
7835 /* Make the required operation. See if we can use existing rtx. */
7836 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7837 extraction_mode, inner, GEN_INT (len), pos_rtx);
7838 if (! in_dest)
7839 new_rtx = gen_lowpart (mode, new_rtx);
7841 return new_rtx;
7844 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7845 can be commuted with any other operations in X. Return X without
7846 that shift if so. */
7848 static rtx
7849 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7851 enum rtx_code code = GET_CODE (x);
7852 rtx tem;
7854 switch (code)
7856 case ASHIFT:
7857 /* This is the shift itself. If it is wide enough, we will return
7858 either the value being shifted if the shift count is equal to
7859 COUNT or a shift for the difference. */
7860 if (CONST_INT_P (XEXP (x, 1))
7861 && INTVAL (XEXP (x, 1)) >= count)
7862 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7863 INTVAL (XEXP (x, 1)) - count);
7864 break;
7866 case NEG: case NOT:
7867 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7868 return simplify_gen_unary (code, mode, tem, mode);
7870 break;
7872 case PLUS: case IOR: case XOR: case AND:
7873 /* If we can safely shift this constant and we find the inner shift,
7874 make a new operation. */
7875 if (CONST_INT_P (XEXP (x, 1))
7876 && (UINTVAL (XEXP (x, 1))
7877 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7878 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7880 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7881 return simplify_gen_binary (code, mode, tem,
7882 gen_int_mode (val, mode));
7884 break;
7886 default:
7887 break;
7890 return 0;
7893 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7894 level of the expression and MODE is its mode. IN_CODE is as for
7895 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7896 that should be used when recursing on operands of *X_PTR.
7898 There are two possible actions:
7900 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7901 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7903 - Return a new rtx, which the caller returns directly. */
7905 static rtx
7906 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7907 enum rtx_code in_code,
7908 enum rtx_code *next_code_ptr)
7910 rtx x = *x_ptr;
7911 enum rtx_code next_code = *next_code_ptr;
7912 enum rtx_code code = GET_CODE (x);
7913 int mode_width = GET_MODE_PRECISION (mode);
7914 rtx rhs, lhs;
7915 rtx new_rtx = 0;
7916 int i;
7917 rtx tem;
7918 scalar_int_mode inner_mode;
7919 bool equality_comparison = false;
7921 if (in_code == EQ)
7923 equality_comparison = true;
7924 in_code = COMPARE;
7927 /* Process depending on the code of this operation. If NEW is set
7928 nonzero, it will be returned. */
7930 switch (code)
7932 case ASHIFT:
7933 /* Convert shifts by constants into multiplications if inside
7934 an address. */
7935 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7936 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7937 && INTVAL (XEXP (x, 1)) >= 0)
7939 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7940 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7942 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7943 if (GET_CODE (new_rtx) == NEG)
7945 new_rtx = XEXP (new_rtx, 0);
7946 multval = -multval;
7948 multval = trunc_int_for_mode (multval, mode);
7949 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7951 break;
7953 case PLUS:
7954 lhs = XEXP (x, 0);
7955 rhs = XEXP (x, 1);
7956 lhs = make_compound_operation (lhs, next_code);
7957 rhs = make_compound_operation (rhs, next_code);
7958 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7960 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7961 XEXP (lhs, 1));
7962 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7964 else if (GET_CODE (lhs) == MULT
7965 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7967 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7968 simplify_gen_unary (NEG, mode,
7969 XEXP (lhs, 1),
7970 mode));
7971 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7973 else
7975 SUBST (XEXP (x, 0), lhs);
7976 SUBST (XEXP (x, 1), rhs);
7978 maybe_swap_commutative_operands (x);
7979 return x;
7981 case MINUS:
7982 lhs = XEXP (x, 0);
7983 rhs = XEXP (x, 1);
7984 lhs = make_compound_operation (lhs, next_code);
7985 rhs = make_compound_operation (rhs, next_code);
7986 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
7988 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7989 XEXP (rhs, 1));
7990 return simplify_gen_binary (PLUS, mode, tem, lhs);
7992 else if (GET_CODE (rhs) == MULT
7993 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7995 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7996 simplify_gen_unary (NEG, mode,
7997 XEXP (rhs, 1),
7998 mode));
7999 return simplify_gen_binary (PLUS, mode, tem, lhs);
8001 else
8003 SUBST (XEXP (x, 0), lhs);
8004 SUBST (XEXP (x, 1), rhs);
8005 return x;
8008 case AND:
8009 /* If the second operand is not a constant, we can't do anything
8010 with it. */
8011 if (!CONST_INT_P (XEXP (x, 1)))
8012 break;
8014 /* If the constant is a power of two minus one and the first operand
8015 is a logical right shift, make an extraction. */
8016 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8017 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8019 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8020 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8021 i, 1, 0, in_code == COMPARE);
8024 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8025 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8026 && subreg_lowpart_p (XEXP (x, 0))
8027 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8028 &inner_mode)
8029 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8030 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8032 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8033 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8034 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8035 XEXP (inner_x0, 1),
8036 i, 1, 0, in_code == COMPARE);
8038 /* If we narrowed the mode when dropping the subreg, then we lose. */
8039 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8040 new_rtx = NULL;
8042 /* If that didn't give anything, see if the AND simplifies on
8043 its own. */
8044 if (!new_rtx && i >= 0)
8046 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8047 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8048 0, in_code == COMPARE);
8051 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8052 else if ((GET_CODE (XEXP (x, 0)) == XOR
8053 || GET_CODE (XEXP (x, 0)) == IOR)
8054 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8055 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8056 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8058 /* Apply the distributive law, and then try to make extractions. */
8059 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8060 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8061 XEXP (x, 1)),
8062 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8063 XEXP (x, 1)));
8064 new_rtx = make_compound_operation (new_rtx, in_code);
8067 /* If we are have (and (rotate X C) M) and C is larger than the number
8068 of bits in M, this is an extraction. */
8070 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8071 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8072 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8073 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8075 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8076 new_rtx = make_extraction (mode, new_rtx,
8077 (GET_MODE_PRECISION (mode)
8078 - INTVAL (XEXP (XEXP (x, 0), 1))),
8079 NULL_RTX, i, 1, 0, in_code == COMPARE);
8082 /* On machines without logical shifts, if the operand of the AND is
8083 a logical shift and our mask turns off all the propagated sign
8084 bits, we can replace the logical shift with an arithmetic shift. */
8085 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8086 && !have_insn_for (LSHIFTRT, mode)
8087 && have_insn_for (ASHIFTRT, mode)
8088 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8089 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8090 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8091 && mode_width <= HOST_BITS_PER_WIDE_INT)
8093 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8095 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8096 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8097 SUBST (XEXP (x, 0),
8098 gen_rtx_ASHIFTRT (mode,
8099 make_compound_operation (XEXP (XEXP (x,
8102 next_code),
8103 XEXP (XEXP (x, 0), 1)));
8106 /* If the constant is one less than a power of two, this might be
8107 representable by an extraction even if no shift is present.
8108 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8109 we are in a COMPARE. */
8110 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8111 new_rtx = make_extraction (mode,
8112 make_compound_operation (XEXP (x, 0),
8113 next_code),
8114 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8116 /* If we are in a comparison and this is an AND with a power of two,
8117 convert this into the appropriate bit extract. */
8118 else if (in_code == COMPARE
8119 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8120 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8121 new_rtx = make_extraction (mode,
8122 make_compound_operation (XEXP (x, 0),
8123 next_code),
8124 i, NULL_RTX, 1, 1, 0, 1);
8126 /* If the one operand is a paradoxical subreg of a register or memory and
8127 the constant (limited to the smaller mode) has only zero bits where
8128 the sub expression has known zero bits, this can be expressed as
8129 a zero_extend. */
8130 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8132 rtx sub;
8134 sub = XEXP (XEXP (x, 0), 0);
8135 machine_mode sub_mode = GET_MODE (sub);
8136 if ((REG_P (sub) || MEM_P (sub))
8137 && GET_MODE_PRECISION (sub_mode) < mode_width)
8139 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8140 unsigned HOST_WIDE_INT mask;
8142 /* original AND constant with all the known zero bits set */
8143 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8144 if ((mask & mode_mask) == mode_mask)
8146 new_rtx = make_compound_operation (sub, next_code);
8147 new_rtx = make_extraction (mode, new_rtx, 0, 0,
8148 GET_MODE_PRECISION (sub_mode),
8149 1, 0, in_code == COMPARE);
8154 break;
8156 case LSHIFTRT:
8157 /* If the sign bit is known to be zero, replace this with an
8158 arithmetic shift. */
8159 if (have_insn_for (ASHIFTRT, mode)
8160 && ! have_insn_for (LSHIFTRT, mode)
8161 && mode_width <= HOST_BITS_PER_WIDE_INT
8162 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8164 new_rtx = gen_rtx_ASHIFTRT (mode,
8165 make_compound_operation (XEXP (x, 0),
8166 next_code),
8167 XEXP (x, 1));
8168 break;
8171 /* fall through */
8173 case ASHIFTRT:
8174 lhs = XEXP (x, 0);
8175 rhs = XEXP (x, 1);
8177 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8178 this is a SIGN_EXTRACT. */
8179 if (CONST_INT_P (rhs)
8180 && GET_CODE (lhs) == ASHIFT
8181 && CONST_INT_P (XEXP (lhs, 1))
8182 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8183 && INTVAL (XEXP (lhs, 1)) >= 0
8184 && INTVAL (rhs) < mode_width)
8186 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8187 new_rtx = make_extraction (mode, new_rtx,
8188 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8189 NULL_RTX, mode_width - INTVAL (rhs),
8190 code == LSHIFTRT, 0, in_code == COMPARE);
8191 break;
8194 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8195 If so, try to merge the shifts into a SIGN_EXTEND. We could
8196 also do this for some cases of SIGN_EXTRACT, but it doesn't
8197 seem worth the effort; the case checked for occurs on Alpha. */
8199 if (!OBJECT_P (lhs)
8200 && ! (GET_CODE (lhs) == SUBREG
8201 && (OBJECT_P (SUBREG_REG (lhs))))
8202 && CONST_INT_P (rhs)
8203 && INTVAL (rhs) >= 0
8204 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8205 && INTVAL (rhs) < mode_width
8206 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8207 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8208 next_code),
8209 0, NULL_RTX, mode_width - INTVAL (rhs),
8210 code == LSHIFTRT, 0, in_code == COMPARE);
8212 break;
8214 case SUBREG:
8215 /* Call ourselves recursively on the inner expression. If we are
8216 narrowing the object and it has a different RTL code from
8217 what it originally did, do this SUBREG as a force_to_mode. */
8219 rtx inner = SUBREG_REG (x), simplified;
8220 enum rtx_code subreg_code = in_code;
8222 /* If the SUBREG is masking of a logical right shift,
8223 make an extraction. */
8224 if (GET_CODE (inner) == LSHIFTRT
8225 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8226 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8227 && CONST_INT_P (XEXP (inner, 1))
8228 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8229 && subreg_lowpart_p (x))
8231 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8232 int width = GET_MODE_PRECISION (inner_mode)
8233 - INTVAL (XEXP (inner, 1));
8234 if (width > mode_width)
8235 width = mode_width;
8236 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8237 width, 1, 0, in_code == COMPARE);
8238 break;
8241 /* If in_code is COMPARE, it isn't always safe to pass it through
8242 to the recursive make_compound_operation call. */
8243 if (subreg_code == COMPARE
8244 && (!subreg_lowpart_p (x)
8245 || GET_CODE (inner) == SUBREG
8246 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8247 is (const_int 0), rather than
8248 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8249 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8250 for non-equality comparisons against 0 is not equivalent
8251 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8252 || (GET_CODE (inner) == AND
8253 && CONST_INT_P (XEXP (inner, 1))
8254 && partial_subreg_p (x)
8255 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8256 >= GET_MODE_BITSIZE (mode) - 1)))
8257 subreg_code = SET;
8259 tem = make_compound_operation (inner, subreg_code);
8261 simplified
8262 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8263 if (simplified)
8264 tem = simplified;
8266 if (GET_CODE (tem) != GET_CODE (inner)
8267 && partial_subreg_p (x)
8268 && subreg_lowpart_p (x))
8270 rtx newer
8271 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8273 /* If we have something other than a SUBREG, we might have
8274 done an expansion, so rerun ourselves. */
8275 if (GET_CODE (newer) != SUBREG)
8276 newer = make_compound_operation (newer, in_code);
8278 /* force_to_mode can expand compounds. If it just re-expanded
8279 the compound, use gen_lowpart to convert to the desired
8280 mode. */
8281 if (rtx_equal_p (newer, x)
8282 /* Likewise if it re-expanded the compound only partially.
8283 This happens for SUBREG of ZERO_EXTRACT if they extract
8284 the same number of bits. */
8285 || (GET_CODE (newer) == SUBREG
8286 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8287 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8288 && GET_CODE (inner) == AND
8289 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8290 return gen_lowpart (GET_MODE (x), tem);
8292 return newer;
8295 if (simplified)
8296 return tem;
8298 break;
8300 default:
8301 break;
8304 if (new_rtx)
8305 *x_ptr = gen_lowpart (mode, new_rtx);
8306 *next_code_ptr = next_code;
8307 return NULL_RTX;
8310 /* Look at the expression rooted at X. Look for expressions
8311 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8312 Form these expressions.
8314 Return the new rtx, usually just X.
8316 Also, for machines like the VAX that don't have logical shift insns,
8317 try to convert logical to arithmetic shift operations in cases where
8318 they are equivalent. This undoes the canonicalizations to logical
8319 shifts done elsewhere.
8321 We try, as much as possible, to re-use rtl expressions to save memory.
8323 IN_CODE says what kind of expression we are processing. Normally, it is
8324 SET. In a memory address it is MEM. When processing the arguments of
8325 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8326 precisely it is an equality comparison against zero. */
8329 make_compound_operation (rtx x, enum rtx_code in_code)
8331 enum rtx_code code = GET_CODE (x);
8332 const char *fmt;
8333 int i, j;
8334 enum rtx_code next_code;
8335 rtx new_rtx, tem;
8337 /* Select the code to be used in recursive calls. Once we are inside an
8338 address, we stay there. If we have a comparison, set to COMPARE,
8339 but once inside, go back to our default of SET. */
8341 next_code = (code == MEM ? MEM
8342 : ((code == COMPARE || COMPARISON_P (x))
8343 && XEXP (x, 1) == const0_rtx) ? COMPARE
8344 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8346 scalar_int_mode mode;
8347 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8349 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8350 &next_code);
8351 if (new_rtx)
8352 return new_rtx;
8353 code = GET_CODE (x);
8356 /* Now recursively process each operand of this operation. We need to
8357 handle ZERO_EXTEND specially so that we don't lose track of the
8358 inner mode. */
8359 if (code == ZERO_EXTEND)
8361 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8362 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8363 new_rtx, GET_MODE (XEXP (x, 0)));
8364 if (tem)
8365 return tem;
8366 SUBST (XEXP (x, 0), new_rtx);
8367 return x;
8370 fmt = GET_RTX_FORMAT (code);
8371 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8372 if (fmt[i] == 'e')
8374 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8375 SUBST (XEXP (x, i), new_rtx);
8377 else if (fmt[i] == 'E')
8378 for (j = 0; j < XVECLEN (x, i); j++)
8380 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8381 SUBST (XVECEXP (x, i, j), new_rtx);
8384 maybe_swap_commutative_operands (x);
8385 return x;
8388 /* Given M see if it is a value that would select a field of bits
8389 within an item, but not the entire word. Return -1 if not.
8390 Otherwise, return the starting position of the field, where 0 is the
8391 low-order bit.
8393 *PLEN is set to the length of the field. */
8395 static int
8396 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8398 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8399 int pos = m ? ctz_hwi (m) : -1;
8400 int len = 0;
8402 if (pos >= 0)
8403 /* Now shift off the low-order zero bits and see if we have a
8404 power of two minus 1. */
8405 len = exact_log2 ((m >> pos) + 1);
8407 if (len <= 0)
8408 pos = -1;
8410 *plen = len;
8411 return pos;
8414 /* If X refers to a register that equals REG in value, replace these
8415 references with REG. */
8416 static rtx
8417 canon_reg_for_combine (rtx x, rtx reg)
8419 rtx op0, op1, op2;
8420 const char *fmt;
8421 int i;
8422 bool copied;
8424 enum rtx_code code = GET_CODE (x);
8425 switch (GET_RTX_CLASS (code))
8427 case RTX_UNARY:
8428 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8429 if (op0 != XEXP (x, 0))
8430 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8431 GET_MODE (reg));
8432 break;
8434 case RTX_BIN_ARITH:
8435 case RTX_COMM_ARITH:
8436 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8437 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8438 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8439 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8440 break;
8442 case RTX_COMPARE:
8443 case RTX_COMM_COMPARE:
8444 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8445 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8446 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8447 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8448 GET_MODE (op0), op0, op1);
8449 break;
8451 case RTX_TERNARY:
8452 case RTX_BITFIELD_OPS:
8453 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8454 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8455 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8456 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8457 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8458 GET_MODE (op0), op0, op1, op2);
8459 /* FALLTHRU */
8461 case RTX_OBJ:
8462 if (REG_P (x))
8464 if (rtx_equal_p (get_last_value (reg), x)
8465 || rtx_equal_p (reg, get_last_value (x)))
8466 return reg;
8467 else
8468 break;
8471 /* fall through */
8473 default:
8474 fmt = GET_RTX_FORMAT (code);
8475 copied = false;
8476 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8477 if (fmt[i] == 'e')
8479 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8480 if (op != XEXP (x, i))
8482 if (!copied)
8484 copied = true;
8485 x = copy_rtx (x);
8487 XEXP (x, i) = op;
8490 else if (fmt[i] == 'E')
8492 int j;
8493 for (j = 0; j < XVECLEN (x, i); j++)
8495 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8496 if (op != XVECEXP (x, i, j))
8498 if (!copied)
8500 copied = true;
8501 x = copy_rtx (x);
8503 XVECEXP (x, i, j) = op;
8508 break;
8511 return x;
8514 /* Return X converted to MODE. If the value is already truncated to
8515 MODE we can just return a subreg even though in the general case we
8516 would need an explicit truncation. */
8518 static rtx
8519 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8521 if (!CONST_INT_P (x)
8522 && partial_subreg_p (mode, GET_MODE (x))
8523 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8524 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8526 /* Bit-cast X into an integer mode. */
8527 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8528 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8529 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8530 x, GET_MODE (x));
8533 return gen_lowpart (mode, x);
8536 /* See if X can be simplified knowing that we will only refer to it in
8537 MODE and will only refer to those bits that are nonzero in MASK.
8538 If other bits are being computed or if masking operations are done
8539 that select a superset of the bits in MASK, they can sometimes be
8540 ignored.
8542 Return a possibly simplified expression, but always convert X to
8543 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8545 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8546 are all off in X. This is used when X will be complemented, by either
8547 NOT, NEG, or XOR. */
8549 static rtx
8550 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8551 int just_select)
8553 enum rtx_code code = GET_CODE (x);
8554 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8555 machine_mode op_mode;
8556 unsigned HOST_WIDE_INT nonzero;
8558 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8559 code below will do the wrong thing since the mode of such an
8560 expression is VOIDmode.
8562 Also do nothing if X is a CLOBBER; this can happen if X was
8563 the return value from a call to gen_lowpart. */
8564 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8565 return x;
8567 /* We want to perform the operation in its present mode unless we know
8568 that the operation is valid in MODE, in which case we do the operation
8569 in MODE. */
8570 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8571 && have_insn_for (code, mode))
8572 ? mode : GET_MODE (x));
8574 /* It is not valid to do a right-shift in a narrower mode
8575 than the one it came in with. */
8576 if ((code == LSHIFTRT || code == ASHIFTRT)
8577 && partial_subreg_p (mode, GET_MODE (x)))
8578 op_mode = GET_MODE (x);
8580 /* Truncate MASK to fit OP_MODE. */
8581 if (op_mode)
8582 mask &= GET_MODE_MASK (op_mode);
8584 /* Determine what bits of X are guaranteed to be (non)zero. */
8585 nonzero = nonzero_bits (x, mode);
8587 /* If none of the bits in X are needed, return a zero. */
8588 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8589 x = const0_rtx;
8591 /* If X is a CONST_INT, return a new one. Do this here since the
8592 test below will fail. */
8593 if (CONST_INT_P (x))
8595 if (SCALAR_INT_MODE_P (mode))
8596 return gen_int_mode (INTVAL (x) & mask, mode);
8597 else
8599 x = GEN_INT (INTVAL (x) & mask);
8600 return gen_lowpart_common (mode, x);
8604 /* If X is narrower than MODE and we want all the bits in X's mode, just
8605 get X in the proper mode. */
8606 if (paradoxical_subreg_p (mode, GET_MODE (x))
8607 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8608 return gen_lowpart (mode, x);
8610 /* We can ignore the effect of a SUBREG if it narrows the mode or
8611 if the constant masks to zero all the bits the mode doesn't have. */
8612 if (GET_CODE (x) == SUBREG
8613 && subreg_lowpart_p (x)
8614 && (partial_subreg_p (x)
8615 || (0 == (mask
8616 & GET_MODE_MASK (GET_MODE (x))
8617 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8618 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8620 scalar_int_mode int_mode, xmode;
8621 if (is_a <scalar_int_mode> (mode, &int_mode)
8622 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8623 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8624 integer too. */
8625 return force_int_to_mode (x, int_mode, xmode,
8626 as_a <scalar_int_mode> (op_mode),
8627 mask, just_select);
8629 return gen_lowpart_or_truncate (mode, x);
8632 /* Subroutine of force_to_mode that handles cases in which both X and
8633 the result are scalar integers. MODE is the mode of the result,
8634 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8635 is preferred for simplified versions of X. The other arguments
8636 are as for force_to_mode. */
8638 static rtx
8639 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8640 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8641 int just_select)
8643 enum rtx_code code = GET_CODE (x);
8644 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8645 unsigned HOST_WIDE_INT fuller_mask;
8646 rtx op0, op1, temp;
8648 /* When we have an arithmetic operation, or a shift whose count we
8649 do not know, we need to assume that all bits up to the highest-order
8650 bit in MASK will be needed. This is how we form such a mask. */
8651 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8652 fuller_mask = HOST_WIDE_INT_M1U;
8653 else
8654 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8655 - 1);
8657 switch (code)
8659 case CLOBBER:
8660 /* If X is a (clobber (const_int)), return it since we know we are
8661 generating something that won't match. */
8662 return x;
8664 case SIGN_EXTEND:
8665 case ZERO_EXTEND:
8666 case ZERO_EXTRACT:
8667 case SIGN_EXTRACT:
8668 x = expand_compound_operation (x);
8669 if (GET_CODE (x) != code)
8670 return force_to_mode (x, mode, mask, next_select);
8671 break;
8673 case TRUNCATE:
8674 /* Similarly for a truncate. */
8675 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8677 case AND:
8678 /* If this is an AND with a constant, convert it into an AND
8679 whose constant is the AND of that constant with MASK. If it
8680 remains an AND of MASK, delete it since it is redundant. */
8682 if (CONST_INT_P (XEXP (x, 1)))
8684 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8685 mask & INTVAL (XEXP (x, 1)));
8686 xmode = op_mode;
8688 /* If X is still an AND, see if it is an AND with a mask that
8689 is just some low-order bits. If so, and it is MASK, we don't
8690 need it. */
8692 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8693 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8694 x = XEXP (x, 0);
8696 /* If it remains an AND, try making another AND with the bits
8697 in the mode mask that aren't in MASK turned on. If the
8698 constant in the AND is wide enough, this might make a
8699 cheaper constant. */
8701 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8702 && GET_MODE_MASK (xmode) != mask
8703 && HWI_COMPUTABLE_MODE_P (xmode))
8705 unsigned HOST_WIDE_INT cval
8706 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8707 rtx y;
8709 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8710 gen_int_mode (cval, xmode));
8711 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8712 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8713 x = y;
8716 break;
8719 goto binop;
8721 case PLUS:
8722 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8723 low-order bits (as in an alignment operation) and FOO is already
8724 aligned to that boundary, mask C1 to that boundary as well.
8725 This may eliminate that PLUS and, later, the AND. */
8728 unsigned int width = GET_MODE_PRECISION (mode);
8729 unsigned HOST_WIDE_INT smask = mask;
8731 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8732 number, sign extend it. */
8734 if (width < HOST_BITS_PER_WIDE_INT
8735 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8736 smask |= HOST_WIDE_INT_M1U << width;
8738 if (CONST_INT_P (XEXP (x, 1))
8739 && pow2p_hwi (- smask)
8740 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8741 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8742 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8743 (INTVAL (XEXP (x, 1)) & smask)),
8744 mode, smask, next_select);
8747 /* fall through */
8749 case MULT:
8750 /* Substituting into the operands of a widening MULT is not likely to
8751 create RTL matching a machine insn. */
8752 if (code == MULT
8753 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8754 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8755 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8756 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8757 && REG_P (XEXP (XEXP (x, 0), 0))
8758 && REG_P (XEXP (XEXP (x, 1), 0)))
8759 return gen_lowpart_or_truncate (mode, x);
8761 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8762 most significant bit in MASK since carries from those bits will
8763 affect the bits we are interested in. */
8764 mask = fuller_mask;
8765 goto binop;
8767 case MINUS:
8768 /* If X is (minus C Y) where C's least set bit is larger than any bit
8769 in the mask, then we may replace with (neg Y). */
8770 if (CONST_INT_P (XEXP (x, 0))
8771 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8773 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8774 return force_to_mode (x, mode, mask, next_select);
8777 /* Similarly, if C contains every bit in the fuller_mask, then we may
8778 replace with (not Y). */
8779 if (CONST_INT_P (XEXP (x, 0))
8780 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8782 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8783 return force_to_mode (x, mode, mask, next_select);
8786 mask = fuller_mask;
8787 goto binop;
8789 case IOR:
8790 case XOR:
8791 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8792 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8793 operation which may be a bitfield extraction. Ensure that the
8794 constant we form is not wider than the mode of X. */
8796 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8797 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8798 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8799 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8800 && CONST_INT_P (XEXP (x, 1))
8801 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8802 + floor_log2 (INTVAL (XEXP (x, 1))))
8803 < GET_MODE_PRECISION (xmode))
8804 && (UINTVAL (XEXP (x, 1))
8805 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8807 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8808 << INTVAL (XEXP (XEXP (x, 0), 1)),
8809 xmode);
8810 temp = simplify_gen_binary (GET_CODE (x), xmode,
8811 XEXP (XEXP (x, 0), 0), temp);
8812 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8813 XEXP (XEXP (x, 0), 1));
8814 return force_to_mode (x, mode, mask, next_select);
8817 binop:
8818 /* For most binary operations, just propagate into the operation and
8819 change the mode if we have an operation of that mode. */
8821 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8822 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8824 /* If we ended up truncating both operands, truncate the result of the
8825 operation instead. */
8826 if (GET_CODE (op0) == TRUNCATE
8827 && GET_CODE (op1) == TRUNCATE)
8829 op0 = XEXP (op0, 0);
8830 op1 = XEXP (op1, 0);
8833 op0 = gen_lowpart_or_truncate (op_mode, op0);
8834 op1 = gen_lowpart_or_truncate (op_mode, op1);
8836 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8838 x = simplify_gen_binary (code, op_mode, op0, op1);
8839 xmode = op_mode;
8841 break;
8843 case ASHIFT:
8844 /* For left shifts, do the same, but just for the first operand.
8845 However, we cannot do anything with shifts where we cannot
8846 guarantee that the counts are smaller than the size of the mode
8847 because such a count will have a different meaning in a
8848 wider mode. */
8850 if (! (CONST_INT_P (XEXP (x, 1))
8851 && INTVAL (XEXP (x, 1)) >= 0
8852 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8853 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8854 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8855 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8856 break;
8858 /* If the shift count is a constant and we can do arithmetic in
8859 the mode of the shift, refine which bits we need. Otherwise, use the
8860 conservative form of the mask. */
8861 if (CONST_INT_P (XEXP (x, 1))
8862 && INTVAL (XEXP (x, 1)) >= 0
8863 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8864 && HWI_COMPUTABLE_MODE_P (op_mode))
8865 mask >>= INTVAL (XEXP (x, 1));
8866 else
8867 mask = fuller_mask;
8869 op0 = gen_lowpart_or_truncate (op_mode,
8870 force_to_mode (XEXP (x, 0), op_mode,
8871 mask, next_select));
8873 if (op_mode != xmode || op0 != XEXP (x, 0))
8875 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8876 xmode = op_mode;
8878 break;
8880 case LSHIFTRT:
8881 /* Here we can only do something if the shift count is a constant,
8882 this shift constant is valid for the host, and we can do arithmetic
8883 in OP_MODE. */
8885 if (CONST_INT_P (XEXP (x, 1))
8886 && INTVAL (XEXP (x, 1)) >= 0
8887 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8888 && HWI_COMPUTABLE_MODE_P (op_mode))
8890 rtx inner = XEXP (x, 0);
8891 unsigned HOST_WIDE_INT inner_mask;
8893 /* Select the mask of the bits we need for the shift operand. */
8894 inner_mask = mask << INTVAL (XEXP (x, 1));
8896 /* We can only change the mode of the shift if we can do arithmetic
8897 in the mode of the shift and INNER_MASK is no wider than the
8898 width of X's mode. */
8899 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8900 op_mode = xmode;
8902 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8904 if (xmode != op_mode || inner != XEXP (x, 0))
8906 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8907 xmode = op_mode;
8911 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8912 shift and AND produces only copies of the sign bit (C2 is one less
8913 than a power of two), we can do this with just a shift. */
8915 if (GET_CODE (x) == LSHIFTRT
8916 && CONST_INT_P (XEXP (x, 1))
8917 /* The shift puts one of the sign bit copies in the least significant
8918 bit. */
8919 && ((INTVAL (XEXP (x, 1))
8920 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8921 >= GET_MODE_PRECISION (xmode))
8922 && pow2p_hwi (mask + 1)
8923 /* Number of bits left after the shift must be more than the mask
8924 needs. */
8925 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8926 <= GET_MODE_PRECISION (xmode))
8927 /* Must be more sign bit copies than the mask needs. */
8928 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8929 >= exact_log2 (mask + 1)))
8930 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
8931 GEN_INT (GET_MODE_PRECISION (xmode)
8932 - exact_log2 (mask + 1)));
8934 goto shiftrt;
8936 case ASHIFTRT:
8937 /* If we are just looking for the sign bit, we don't need this shift at
8938 all, even if it has a variable count. */
8939 if (val_signbit_p (xmode, mask))
8940 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8942 /* If this is a shift by a constant, get a mask that contains those bits
8943 that are not copies of the sign bit. We then have two cases: If
8944 MASK only includes those bits, this can be a logical shift, which may
8945 allow simplifications. If MASK is a single-bit field not within
8946 those bits, we are requesting a copy of the sign bit and hence can
8947 shift the sign bit to the appropriate location. */
8949 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8950 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8952 unsigned HOST_WIDE_INT nonzero;
8953 int i;
8955 /* If the considered data is wider than HOST_WIDE_INT, we can't
8956 represent a mask for all its bits in a single scalar.
8957 But we only care about the lower bits, so calculate these. */
8959 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
8961 nonzero = HOST_WIDE_INT_M1U;
8963 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8964 is the number of bits a full-width mask would have set.
8965 We need only shift if these are fewer than nonzero can
8966 hold. If not, we must keep all bits set in nonzero. */
8968 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
8969 < HOST_BITS_PER_WIDE_INT)
8970 nonzero >>= INTVAL (XEXP (x, 1))
8971 + HOST_BITS_PER_WIDE_INT
8972 - GET_MODE_PRECISION (xmode);
8974 else
8976 nonzero = GET_MODE_MASK (xmode);
8977 nonzero >>= INTVAL (XEXP (x, 1));
8980 if ((mask & ~nonzero) == 0)
8982 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
8983 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8984 if (GET_CODE (x) != ASHIFTRT)
8985 return force_to_mode (x, mode, mask, next_select);
8988 else if ((i = exact_log2 (mask)) >= 0)
8990 x = simplify_shift_const
8991 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
8992 GET_MODE_PRECISION (xmode) - 1 - i);
8994 if (GET_CODE (x) != ASHIFTRT)
8995 return force_to_mode (x, mode, mask, next_select);
8999 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9000 even if the shift count isn't a constant. */
9001 if (mask == 1)
9002 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9004 shiftrt:
9006 /* If this is a zero- or sign-extension operation that just affects bits
9007 we don't care about, remove it. Be sure the call above returned
9008 something that is still a shift. */
9010 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9011 && CONST_INT_P (XEXP (x, 1))
9012 && INTVAL (XEXP (x, 1)) >= 0
9013 && (INTVAL (XEXP (x, 1))
9014 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9015 && GET_CODE (XEXP (x, 0)) == ASHIFT
9016 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9017 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9018 next_select);
9020 break;
9022 case ROTATE:
9023 case ROTATERT:
9024 /* If the shift count is constant and we can do computations
9025 in the mode of X, compute where the bits we care about are.
9026 Otherwise, we can't do anything. Don't change the mode of
9027 the shift or propagate MODE into the shift, though. */
9028 if (CONST_INT_P (XEXP (x, 1))
9029 && INTVAL (XEXP (x, 1)) >= 0)
9031 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9032 xmode, gen_int_mode (mask, xmode),
9033 XEXP (x, 1));
9034 if (temp && CONST_INT_P (temp))
9035 x = simplify_gen_binary (code, xmode,
9036 force_to_mode (XEXP (x, 0), xmode,
9037 INTVAL (temp), next_select),
9038 XEXP (x, 1));
9040 break;
9042 case NEG:
9043 /* If we just want the low-order bit, the NEG isn't needed since it
9044 won't change the low-order bit. */
9045 if (mask == 1)
9046 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9048 /* We need any bits less significant than the most significant bit in
9049 MASK since carries from those bits will affect the bits we are
9050 interested in. */
9051 mask = fuller_mask;
9052 goto unop;
9054 case NOT:
9055 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9056 same as the XOR case above. Ensure that the constant we form is not
9057 wider than the mode of X. */
9059 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9060 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9061 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9062 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9063 < GET_MODE_PRECISION (xmode))
9064 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9066 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9067 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9068 x = simplify_gen_binary (LSHIFTRT, xmode,
9069 temp, XEXP (XEXP (x, 0), 1));
9071 return force_to_mode (x, mode, mask, next_select);
9074 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9075 use the full mask inside the NOT. */
9076 mask = fuller_mask;
9078 unop:
9079 op0 = gen_lowpart_or_truncate (op_mode,
9080 force_to_mode (XEXP (x, 0), mode, mask,
9081 next_select));
9082 if (op_mode != xmode || op0 != XEXP (x, 0))
9084 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9085 xmode = op_mode;
9087 break;
9089 case NE:
9090 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9091 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9092 which is equal to STORE_FLAG_VALUE. */
9093 if ((mask & ~STORE_FLAG_VALUE) == 0
9094 && XEXP (x, 1) == const0_rtx
9095 && GET_MODE (XEXP (x, 0)) == mode
9096 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9097 && (nonzero_bits (XEXP (x, 0), mode)
9098 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9099 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9101 break;
9103 case IF_THEN_ELSE:
9104 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9105 written in a narrower mode. We play it safe and do not do so. */
9107 op0 = gen_lowpart_or_truncate (xmode,
9108 force_to_mode (XEXP (x, 1), mode,
9109 mask, next_select));
9110 op1 = gen_lowpart_or_truncate (xmode,
9111 force_to_mode (XEXP (x, 2), mode,
9112 mask, next_select));
9113 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9114 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9115 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9116 op0, op1);
9117 break;
9119 default:
9120 break;
9123 /* Ensure we return a value of the proper mode. */
9124 return gen_lowpart_or_truncate (mode, x);
9127 /* Return nonzero if X is an expression that has one of two values depending on
9128 whether some other value is zero or nonzero. In that case, we return the
9129 value that is being tested, *PTRUE is set to the value if the rtx being
9130 returned has a nonzero value, and *PFALSE is set to the other alternative.
9132 If we return zero, we set *PTRUE and *PFALSE to X. */
9134 static rtx
9135 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9137 machine_mode mode = GET_MODE (x);
9138 enum rtx_code code = GET_CODE (x);
9139 rtx cond0, cond1, true0, true1, false0, false1;
9140 unsigned HOST_WIDE_INT nz;
9141 scalar_int_mode int_mode;
9143 /* If we are comparing a value against zero, we are done. */
9144 if ((code == NE || code == EQ)
9145 && XEXP (x, 1) == const0_rtx)
9147 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9148 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9149 return XEXP (x, 0);
9152 /* If this is a unary operation whose operand has one of two values, apply
9153 our opcode to compute those values. */
9154 else if (UNARY_P (x)
9155 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9157 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9158 *pfalse = simplify_gen_unary (code, mode, false0,
9159 GET_MODE (XEXP (x, 0)));
9160 return cond0;
9163 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9164 make can't possibly match and would suppress other optimizations. */
9165 else if (code == COMPARE)
9168 /* If this is a binary operation, see if either side has only one of two
9169 values. If either one does or if both do and they are conditional on
9170 the same value, compute the new true and false values. */
9171 else if (BINARY_P (x))
9173 rtx op0 = XEXP (x, 0);
9174 rtx op1 = XEXP (x, 1);
9175 cond0 = if_then_else_cond (op0, &true0, &false0);
9176 cond1 = if_then_else_cond (op1, &true1, &false1);
9178 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9179 && (REG_P (op0) || REG_P (op1)))
9181 /* Try to enable a simplification by undoing work done by
9182 if_then_else_cond if it converted a REG into something more
9183 complex. */
9184 if (REG_P (op0))
9186 cond0 = 0;
9187 true0 = false0 = op0;
9189 else
9191 cond1 = 0;
9192 true1 = false1 = op1;
9196 if ((cond0 != 0 || cond1 != 0)
9197 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9199 /* If if_then_else_cond returned zero, then true/false are the
9200 same rtl. We must copy one of them to prevent invalid rtl
9201 sharing. */
9202 if (cond0 == 0)
9203 true0 = copy_rtx (true0);
9204 else if (cond1 == 0)
9205 true1 = copy_rtx (true1);
9207 if (COMPARISON_P (x))
9209 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9210 true0, true1);
9211 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9212 false0, false1);
9214 else
9216 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9217 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9220 return cond0 ? cond0 : cond1;
9223 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9224 operands is zero when the other is nonzero, and vice-versa,
9225 and STORE_FLAG_VALUE is 1 or -1. */
9227 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9228 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9229 || code == UMAX)
9230 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9232 rtx op0 = XEXP (XEXP (x, 0), 1);
9233 rtx op1 = XEXP (XEXP (x, 1), 1);
9235 cond0 = XEXP (XEXP (x, 0), 0);
9236 cond1 = XEXP (XEXP (x, 1), 0);
9238 if (COMPARISON_P (cond0)
9239 && COMPARISON_P (cond1)
9240 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9241 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9242 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9243 || ((swap_condition (GET_CODE (cond0))
9244 == reversed_comparison_code (cond1, NULL))
9245 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9246 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9247 && ! side_effects_p (x))
9249 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9250 *pfalse = simplify_gen_binary (MULT, mode,
9251 (code == MINUS
9252 ? simplify_gen_unary (NEG, mode,
9253 op1, mode)
9254 : op1),
9255 const_true_rtx);
9256 return cond0;
9260 /* Similarly for MULT, AND and UMIN, except that for these the result
9261 is always zero. */
9262 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9263 && (code == MULT || code == AND || code == UMIN)
9264 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9266 cond0 = XEXP (XEXP (x, 0), 0);
9267 cond1 = XEXP (XEXP (x, 1), 0);
9269 if (COMPARISON_P (cond0)
9270 && COMPARISON_P (cond1)
9271 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9272 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9273 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9274 || ((swap_condition (GET_CODE (cond0))
9275 == reversed_comparison_code (cond1, NULL))
9276 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9277 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9278 && ! side_effects_p (x))
9280 *ptrue = *pfalse = const0_rtx;
9281 return cond0;
9286 else if (code == IF_THEN_ELSE)
9288 /* If we have IF_THEN_ELSE already, extract the condition and
9289 canonicalize it if it is NE or EQ. */
9290 cond0 = XEXP (x, 0);
9291 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9292 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9293 return XEXP (cond0, 0);
9294 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9296 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9297 return XEXP (cond0, 0);
9299 else
9300 return cond0;
9303 /* If X is a SUBREG, we can narrow both the true and false values
9304 if the inner expression, if there is a condition. */
9305 else if (code == SUBREG
9306 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9307 &true0, &false0)))
9309 true0 = simplify_gen_subreg (mode, true0,
9310 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9311 false0 = simplify_gen_subreg (mode, false0,
9312 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9313 if (true0 && false0)
9315 *ptrue = true0;
9316 *pfalse = false0;
9317 return cond0;
9321 /* If X is a constant, this isn't special and will cause confusions
9322 if we treat it as such. Likewise if it is equivalent to a constant. */
9323 else if (CONSTANT_P (x)
9324 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9327 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9328 will be least confusing to the rest of the compiler. */
9329 else if (mode == BImode)
9331 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9332 return x;
9335 /* If X is known to be either 0 or -1, those are the true and
9336 false values when testing X. */
9337 else if (x == constm1_rtx || x == const0_rtx
9338 || (is_a <scalar_int_mode> (mode, &int_mode)
9339 && (num_sign_bit_copies (x, int_mode)
9340 == GET_MODE_PRECISION (int_mode))))
9342 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9343 return x;
9346 /* Likewise for 0 or a single bit. */
9347 else if (HWI_COMPUTABLE_MODE_P (mode)
9348 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9350 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9351 return x;
9354 /* Otherwise fail; show no condition with true and false values the same. */
9355 *ptrue = *pfalse = x;
9356 return 0;
9359 /* Return the value of expression X given the fact that condition COND
9360 is known to be true when applied to REG as its first operand and VAL
9361 as its second. X is known to not be shared and so can be modified in
9362 place.
9364 We only handle the simplest cases, and specifically those cases that
9365 arise with IF_THEN_ELSE expressions. */
9367 static rtx
9368 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9370 enum rtx_code code = GET_CODE (x);
9371 const char *fmt;
9372 int i, j;
9374 if (side_effects_p (x))
9375 return x;
9377 /* If either operand of the condition is a floating point value,
9378 then we have to avoid collapsing an EQ comparison. */
9379 if (cond == EQ
9380 && rtx_equal_p (x, reg)
9381 && ! FLOAT_MODE_P (GET_MODE (x))
9382 && ! FLOAT_MODE_P (GET_MODE (val)))
9383 return val;
9385 if (cond == UNEQ && rtx_equal_p (x, reg))
9386 return val;
9388 /* If X is (abs REG) and we know something about REG's relationship
9389 with zero, we may be able to simplify this. */
9391 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9392 switch (cond)
9394 case GE: case GT: case EQ:
9395 return XEXP (x, 0);
9396 case LT: case LE:
9397 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9398 XEXP (x, 0),
9399 GET_MODE (XEXP (x, 0)));
9400 default:
9401 break;
9404 /* The only other cases we handle are MIN, MAX, and comparisons if the
9405 operands are the same as REG and VAL. */
9407 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9409 if (rtx_equal_p (XEXP (x, 0), val))
9411 std::swap (val, reg);
9412 cond = swap_condition (cond);
9415 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9417 if (COMPARISON_P (x))
9419 if (comparison_dominates_p (cond, code))
9420 return const_true_rtx;
9422 code = reversed_comparison_code (x, NULL);
9423 if (code != UNKNOWN
9424 && comparison_dominates_p (cond, code))
9425 return const0_rtx;
9426 else
9427 return x;
9429 else if (code == SMAX || code == SMIN
9430 || code == UMIN || code == UMAX)
9432 int unsignedp = (code == UMIN || code == UMAX);
9434 /* Do not reverse the condition when it is NE or EQ.
9435 This is because we cannot conclude anything about
9436 the value of 'SMAX (x, y)' when x is not equal to y,
9437 but we can when x equals y. */
9438 if ((code == SMAX || code == UMAX)
9439 && ! (cond == EQ || cond == NE))
9440 cond = reverse_condition (cond);
9442 switch (cond)
9444 case GE: case GT:
9445 return unsignedp ? x : XEXP (x, 1);
9446 case LE: case LT:
9447 return unsignedp ? x : XEXP (x, 0);
9448 case GEU: case GTU:
9449 return unsignedp ? XEXP (x, 1) : x;
9450 case LEU: case LTU:
9451 return unsignedp ? XEXP (x, 0) : x;
9452 default:
9453 break;
9458 else if (code == SUBREG)
9460 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9461 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9463 if (SUBREG_REG (x) != r)
9465 /* We must simplify subreg here, before we lose track of the
9466 original inner_mode. */
9467 new_rtx = simplify_subreg (GET_MODE (x), r,
9468 inner_mode, SUBREG_BYTE (x));
9469 if (new_rtx)
9470 return new_rtx;
9471 else
9472 SUBST (SUBREG_REG (x), r);
9475 return x;
9477 /* We don't have to handle SIGN_EXTEND here, because even in the
9478 case of replacing something with a modeless CONST_INT, a
9479 CONST_INT is already (supposed to be) a valid sign extension for
9480 its narrower mode, which implies it's already properly
9481 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9482 story is different. */
9483 else if (code == ZERO_EXTEND)
9485 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9486 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9488 if (XEXP (x, 0) != r)
9490 /* We must simplify the zero_extend here, before we lose
9491 track of the original inner_mode. */
9492 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9493 r, inner_mode);
9494 if (new_rtx)
9495 return new_rtx;
9496 else
9497 SUBST (XEXP (x, 0), r);
9500 return x;
9503 fmt = GET_RTX_FORMAT (code);
9504 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9506 if (fmt[i] == 'e')
9507 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9508 else if (fmt[i] == 'E')
9509 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9510 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9511 cond, reg, val));
9514 return x;
9517 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9518 assignment as a field assignment. */
9520 static int
9521 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9523 if (widen_x && GET_MODE (x) != GET_MODE (y))
9525 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9526 return 0;
9527 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9528 return 0;
9529 /* For big endian, adjust the memory offset. */
9530 if (BYTES_BIG_ENDIAN)
9531 x = adjust_address_nv (x, GET_MODE (y),
9532 -subreg_lowpart_offset (GET_MODE (x),
9533 GET_MODE (y)));
9534 else
9535 x = adjust_address_nv (x, GET_MODE (y), 0);
9538 if (x == y || rtx_equal_p (x, y))
9539 return 1;
9541 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9542 return 0;
9544 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9545 Note that all SUBREGs of MEM are paradoxical; otherwise they
9546 would have been rewritten. */
9547 if (MEM_P (x) && GET_CODE (y) == SUBREG
9548 && MEM_P (SUBREG_REG (y))
9549 && rtx_equal_p (SUBREG_REG (y),
9550 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9551 return 1;
9553 if (MEM_P (y) && GET_CODE (x) == SUBREG
9554 && MEM_P (SUBREG_REG (x))
9555 && rtx_equal_p (SUBREG_REG (x),
9556 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9557 return 1;
9559 /* We used to see if get_last_value of X and Y were the same but that's
9560 not correct. In one direction, we'll cause the assignment to have
9561 the wrong destination and in the case, we'll import a register into this
9562 insn that might have already have been dead. So fail if none of the
9563 above cases are true. */
9564 return 0;
9567 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9568 Return that assignment if so.
9570 We only handle the most common cases. */
9572 static rtx
9573 make_field_assignment (rtx x)
9575 rtx dest = SET_DEST (x);
9576 rtx src = SET_SRC (x);
9577 rtx assign;
9578 rtx rhs, lhs;
9579 HOST_WIDE_INT c1;
9580 HOST_WIDE_INT pos;
9581 unsigned HOST_WIDE_INT len;
9582 rtx other;
9584 /* All the rules in this function are specific to scalar integers. */
9585 scalar_int_mode mode;
9586 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9587 return x;
9589 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9590 a clear of a one-bit field. We will have changed it to
9591 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9592 for a SUBREG. */
9594 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9595 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9596 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9597 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9599 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9600 1, 1, 1, 0);
9601 if (assign != 0)
9602 return gen_rtx_SET (assign, const0_rtx);
9603 return x;
9606 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9607 && subreg_lowpart_p (XEXP (src, 0))
9608 && partial_subreg_p (XEXP (src, 0))
9609 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9610 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9611 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9612 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9614 assign = make_extraction (VOIDmode, dest, 0,
9615 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9616 1, 1, 1, 0);
9617 if (assign != 0)
9618 return gen_rtx_SET (assign, const0_rtx);
9619 return x;
9622 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9623 one-bit field. */
9624 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9625 && XEXP (XEXP (src, 0), 0) == const1_rtx
9626 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9628 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9629 1, 1, 1, 0);
9630 if (assign != 0)
9631 return gen_rtx_SET (assign, const1_rtx);
9632 return x;
9635 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9636 SRC is an AND with all bits of that field set, then we can discard
9637 the AND. */
9638 if (GET_CODE (dest) == ZERO_EXTRACT
9639 && CONST_INT_P (XEXP (dest, 1))
9640 && GET_CODE (src) == AND
9641 && CONST_INT_P (XEXP (src, 1)))
9643 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9644 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9645 unsigned HOST_WIDE_INT ze_mask;
9647 if (width >= HOST_BITS_PER_WIDE_INT)
9648 ze_mask = -1;
9649 else
9650 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9652 /* Complete overlap. We can remove the source AND. */
9653 if ((and_mask & ze_mask) == ze_mask)
9654 return gen_rtx_SET (dest, XEXP (src, 0));
9656 /* Partial overlap. We can reduce the source AND. */
9657 if ((and_mask & ze_mask) != and_mask)
9659 src = gen_rtx_AND (mode, XEXP (src, 0),
9660 gen_int_mode (and_mask & ze_mask, mode));
9661 return gen_rtx_SET (dest, src);
9665 /* The other case we handle is assignments into a constant-position
9666 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9667 a mask that has all one bits except for a group of zero bits and
9668 OTHER is known to have zeros where C1 has ones, this is such an
9669 assignment. Compute the position and length from C1. Shift OTHER
9670 to the appropriate position, force it to the required mode, and
9671 make the extraction. Check for the AND in both operands. */
9673 /* One or more SUBREGs might obscure the constant-position field
9674 assignment. The first one we are likely to encounter is an outer
9675 narrowing SUBREG, which we can just strip for the purposes of
9676 identifying the constant-field assignment. */
9677 scalar_int_mode src_mode = mode;
9678 if (GET_CODE (src) == SUBREG
9679 && subreg_lowpart_p (src)
9680 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9681 src = SUBREG_REG (src);
9683 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9684 return x;
9686 rhs = expand_compound_operation (XEXP (src, 0));
9687 lhs = expand_compound_operation (XEXP (src, 1));
9689 if (GET_CODE (rhs) == AND
9690 && CONST_INT_P (XEXP (rhs, 1))
9691 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9692 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9693 /* The second SUBREG that might get in the way is a paradoxical
9694 SUBREG around the first operand of the AND. We want to
9695 pretend the operand is as wide as the destination here. We
9696 do this by adjusting the MEM to wider mode for the sole
9697 purpose of the call to rtx_equal_for_field_assignment_p. Also
9698 note this trick only works for MEMs. */
9699 else if (GET_CODE (rhs) == AND
9700 && paradoxical_subreg_p (XEXP (rhs, 0))
9701 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9702 && CONST_INT_P (XEXP (rhs, 1))
9703 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9704 dest, true))
9705 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9706 else if (GET_CODE (lhs) == AND
9707 && CONST_INT_P (XEXP (lhs, 1))
9708 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9709 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9710 /* The second SUBREG that might get in the way is a paradoxical
9711 SUBREG around the first operand of the AND. We want to
9712 pretend the operand is as wide as the destination here. We
9713 do this by adjusting the MEM to wider mode for the sole
9714 purpose of the call to rtx_equal_for_field_assignment_p. Also
9715 note this trick only works for MEMs. */
9716 else if (GET_CODE (lhs) == AND
9717 && paradoxical_subreg_p (XEXP (lhs, 0))
9718 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9719 && CONST_INT_P (XEXP (lhs, 1))
9720 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9721 dest, true))
9722 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9723 else
9724 return x;
9726 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9727 if (pos < 0
9728 || pos + len > GET_MODE_PRECISION (mode)
9729 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9730 || (c1 & nonzero_bits (other, mode)) != 0)
9731 return x;
9733 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9734 if (assign == 0)
9735 return x;
9737 /* The mode to use for the source is the mode of the assignment, or of
9738 what is inside a possible STRICT_LOW_PART. */
9739 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9740 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9742 /* Shift OTHER right POS places and make it the source, restricting it
9743 to the proper length and mode. */
9745 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9746 src_mode, other, pos),
9747 dest);
9748 src = force_to_mode (src, new_mode,
9749 len >= HOST_BITS_PER_WIDE_INT
9750 ? HOST_WIDE_INT_M1U
9751 : (HOST_WIDE_INT_1U << len) - 1,
9754 /* If SRC is masked by an AND that does not make a difference in
9755 the value being stored, strip it. */
9756 if (GET_CODE (assign) == ZERO_EXTRACT
9757 && CONST_INT_P (XEXP (assign, 1))
9758 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9759 && GET_CODE (src) == AND
9760 && CONST_INT_P (XEXP (src, 1))
9761 && UINTVAL (XEXP (src, 1))
9762 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9763 src = XEXP (src, 0);
9765 return gen_rtx_SET (assign, src);
9768 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9769 if so. */
9771 static rtx
9772 apply_distributive_law (rtx x)
9774 enum rtx_code code = GET_CODE (x);
9775 enum rtx_code inner_code;
9776 rtx lhs, rhs, other;
9777 rtx tem;
9779 /* Distributivity is not true for floating point as it can change the
9780 value. So we don't do it unless -funsafe-math-optimizations. */
9781 if (FLOAT_MODE_P (GET_MODE (x))
9782 && ! flag_unsafe_math_optimizations)
9783 return x;
9785 /* The outer operation can only be one of the following: */
9786 if (code != IOR && code != AND && code != XOR
9787 && code != PLUS && code != MINUS)
9788 return x;
9790 lhs = XEXP (x, 0);
9791 rhs = XEXP (x, 1);
9793 /* If either operand is a primitive we can't do anything, so get out
9794 fast. */
9795 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9796 return x;
9798 lhs = expand_compound_operation (lhs);
9799 rhs = expand_compound_operation (rhs);
9800 inner_code = GET_CODE (lhs);
9801 if (inner_code != GET_CODE (rhs))
9802 return x;
9804 /* See if the inner and outer operations distribute. */
9805 switch (inner_code)
9807 case LSHIFTRT:
9808 case ASHIFTRT:
9809 case AND:
9810 case IOR:
9811 /* These all distribute except over PLUS. */
9812 if (code == PLUS || code == MINUS)
9813 return x;
9814 break;
9816 case MULT:
9817 if (code != PLUS && code != MINUS)
9818 return x;
9819 break;
9821 case ASHIFT:
9822 /* This is also a multiply, so it distributes over everything. */
9823 break;
9825 /* This used to handle SUBREG, but this turned out to be counter-
9826 productive, since (subreg (op ...)) usually is not handled by
9827 insn patterns, and this "optimization" therefore transformed
9828 recognizable patterns into unrecognizable ones. Therefore the
9829 SUBREG case was removed from here.
9831 It is possible that distributing SUBREG over arithmetic operations
9832 leads to an intermediate result than can then be optimized further,
9833 e.g. by moving the outer SUBREG to the other side of a SET as done
9834 in simplify_set. This seems to have been the original intent of
9835 handling SUBREGs here.
9837 However, with current GCC this does not appear to actually happen,
9838 at least on major platforms. If some case is found where removing
9839 the SUBREG case here prevents follow-on optimizations, distributing
9840 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9842 default:
9843 return x;
9846 /* Set LHS and RHS to the inner operands (A and B in the example
9847 above) and set OTHER to the common operand (C in the example).
9848 There is only one way to do this unless the inner operation is
9849 commutative. */
9850 if (COMMUTATIVE_ARITH_P (lhs)
9851 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9852 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9853 else if (COMMUTATIVE_ARITH_P (lhs)
9854 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9855 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9856 else if (COMMUTATIVE_ARITH_P (lhs)
9857 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9858 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9859 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9860 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9861 else
9862 return x;
9864 /* Form the new inner operation, seeing if it simplifies first. */
9865 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9867 /* There is one exception to the general way of distributing:
9868 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9869 if (code == XOR && inner_code == IOR)
9871 inner_code = AND;
9872 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9875 /* We may be able to continuing distributing the result, so call
9876 ourselves recursively on the inner operation before forming the
9877 outer operation, which we return. */
9878 return simplify_gen_binary (inner_code, GET_MODE (x),
9879 apply_distributive_law (tem), other);
9882 /* See if X is of the form (* (+ A B) C), and if so convert to
9883 (+ (* A C) (* B C)) and try to simplify.
9885 Most of the time, this results in no change. However, if some of
9886 the operands are the same or inverses of each other, simplifications
9887 will result.
9889 For example, (and (ior A B) (not B)) can occur as the result of
9890 expanding a bit field assignment. When we apply the distributive
9891 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9892 which then simplifies to (and (A (not B))).
9894 Note that no checks happen on the validity of applying the inverse
9895 distributive law. This is pointless since we can do it in the
9896 few places where this routine is called.
9898 N is the index of the term that is decomposed (the arithmetic operation,
9899 i.e. (+ A B) in the first example above). !N is the index of the term that
9900 is distributed, i.e. of C in the first example above. */
9901 static rtx
9902 distribute_and_simplify_rtx (rtx x, int n)
9904 machine_mode mode;
9905 enum rtx_code outer_code, inner_code;
9906 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9908 /* Distributivity is not true for floating point as it can change the
9909 value. So we don't do it unless -funsafe-math-optimizations. */
9910 if (FLOAT_MODE_P (GET_MODE (x))
9911 && ! flag_unsafe_math_optimizations)
9912 return NULL_RTX;
9914 decomposed = XEXP (x, n);
9915 if (!ARITHMETIC_P (decomposed))
9916 return NULL_RTX;
9918 mode = GET_MODE (x);
9919 outer_code = GET_CODE (x);
9920 distributed = XEXP (x, !n);
9922 inner_code = GET_CODE (decomposed);
9923 inner_op0 = XEXP (decomposed, 0);
9924 inner_op1 = XEXP (decomposed, 1);
9926 /* Special case (and (xor B C) (not A)), which is equivalent to
9927 (xor (ior A B) (ior A C)) */
9928 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9930 distributed = XEXP (distributed, 0);
9931 outer_code = IOR;
9934 if (n == 0)
9936 /* Distribute the second term. */
9937 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9938 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9940 else
9942 /* Distribute the first term. */
9943 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9944 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9947 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9948 new_op0, new_op1));
9949 if (GET_CODE (tmp) != outer_code
9950 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9951 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9952 return tmp;
9954 return NULL_RTX;
9957 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9958 in MODE. Return an equivalent form, if different from (and VAROP
9959 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9961 static rtx
9962 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
9963 unsigned HOST_WIDE_INT constop)
9965 unsigned HOST_WIDE_INT nonzero;
9966 unsigned HOST_WIDE_INT orig_constop;
9967 rtx orig_varop;
9968 int i;
9970 orig_varop = varop;
9971 orig_constop = constop;
9972 if (GET_CODE (varop) == CLOBBER)
9973 return NULL_RTX;
9975 /* Simplify VAROP knowing that we will be only looking at some of the
9976 bits in it.
9978 Note by passing in CONSTOP, we guarantee that the bits not set in
9979 CONSTOP are not significant and will never be examined. We must
9980 ensure that is the case by explicitly masking out those bits
9981 before returning. */
9982 varop = force_to_mode (varop, mode, constop, 0);
9984 /* If VAROP is a CLOBBER, we will fail so return it. */
9985 if (GET_CODE (varop) == CLOBBER)
9986 return varop;
9988 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9989 to VAROP and return the new constant. */
9990 if (CONST_INT_P (varop))
9991 return gen_int_mode (INTVAL (varop) & constop, mode);
9993 /* See what bits may be nonzero in VAROP. Unlike the general case of
9994 a call to nonzero_bits, here we don't care about bits outside
9995 MODE. */
9997 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9999 /* Turn off all bits in the constant that are known to already be zero.
10000 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10001 which is tested below. */
10003 constop &= nonzero;
10005 /* If we don't have any bits left, return zero. */
10006 if (constop == 0)
10007 return const0_rtx;
10009 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10010 a power of two, we can replace this with an ASHIFT. */
10011 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10012 && (i = exact_log2 (constop)) >= 0)
10013 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10015 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10016 or XOR, then try to apply the distributive law. This may eliminate
10017 operations if either branch can be simplified because of the AND.
10018 It may also make some cases more complex, but those cases probably
10019 won't match a pattern either with or without this. */
10021 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10023 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10024 return
10025 gen_lowpart
10026 (mode,
10027 apply_distributive_law
10028 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10029 simplify_and_const_int (NULL_RTX, varop_mode,
10030 XEXP (varop, 0),
10031 constop),
10032 simplify_and_const_int (NULL_RTX, varop_mode,
10033 XEXP (varop, 1),
10034 constop))));
10037 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10038 the AND and see if one of the operands simplifies to zero. If so, we
10039 may eliminate it. */
10041 if (GET_CODE (varop) == PLUS
10042 && pow2p_hwi (constop + 1))
10044 rtx o0, o1;
10046 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10047 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10048 if (o0 == const0_rtx)
10049 return o1;
10050 if (o1 == const0_rtx)
10051 return o0;
10054 /* Make a SUBREG if necessary. If we can't make it, fail. */
10055 varop = gen_lowpart (mode, varop);
10056 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10057 return NULL_RTX;
10059 /* If we are only masking insignificant bits, return VAROP. */
10060 if (constop == nonzero)
10061 return varop;
10063 if (varop == orig_varop && constop == orig_constop)
10064 return NULL_RTX;
10066 /* Otherwise, return an AND. */
10067 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10071 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10072 in MODE.
10074 Return an equivalent form, if different from X. Otherwise, return X. If
10075 X is zero, we are to always construct the equivalent form. */
10077 static rtx
10078 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10079 unsigned HOST_WIDE_INT constop)
10081 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10082 if (tem)
10083 return tem;
10085 if (!x)
10086 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10087 gen_int_mode (constop, mode));
10088 if (GET_MODE (x) != mode)
10089 x = gen_lowpart (mode, x);
10090 return x;
10093 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10094 We don't care about bits outside of those defined in MODE.
10096 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10097 a shift, AND, or zero_extract, we can do better. */
10099 static rtx
10100 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10101 scalar_int_mode mode,
10102 unsigned HOST_WIDE_INT *nonzero)
10104 rtx tem;
10105 reg_stat_type *rsp;
10107 /* If X is a register whose nonzero bits value is current, use it.
10108 Otherwise, if X is a register whose value we can find, use that
10109 value. Otherwise, use the previously-computed global nonzero bits
10110 for this register. */
10112 rsp = &reg_stat[REGNO (x)];
10113 if (rsp->last_set_value != 0
10114 && (rsp->last_set_mode == mode
10115 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10116 && GET_MODE_CLASS (mode) == MODE_INT))
10117 && ((rsp->last_set_label >= label_tick_ebb_start
10118 && rsp->last_set_label < label_tick)
10119 || (rsp->last_set_label == label_tick
10120 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10121 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10122 && REGNO (x) < reg_n_sets_max
10123 && REG_N_SETS (REGNO (x)) == 1
10124 && !REGNO_REG_SET_P
10125 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10126 REGNO (x)))))
10128 /* Note that, even if the precision of last_set_mode is lower than that
10129 of mode, record_value_for_reg invoked nonzero_bits on the register
10130 with nonzero_bits_mode (because last_set_mode is necessarily integral
10131 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10132 are all valid, hence in mode too since nonzero_bits_mode is defined
10133 to the largest HWI_COMPUTABLE_MODE_P mode. */
10134 *nonzero &= rsp->last_set_nonzero_bits;
10135 return NULL;
10138 tem = get_last_value (x);
10139 if (tem)
10141 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10142 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10144 return tem;
10147 if (nonzero_sign_valid && rsp->nonzero_bits)
10149 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10151 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10152 /* We don't know anything about the upper bits. */
10153 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10155 *nonzero &= mask;
10158 return NULL;
10161 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10162 end of X that are known to be equal to the sign bit. X will be used
10163 in mode MODE; the returned value will always be between 1 and the
10164 number of bits in MODE. */
10166 static rtx
10167 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10168 scalar_int_mode mode,
10169 unsigned int *result)
10171 rtx tem;
10172 reg_stat_type *rsp;
10174 rsp = &reg_stat[REGNO (x)];
10175 if (rsp->last_set_value != 0
10176 && rsp->last_set_mode == mode
10177 && ((rsp->last_set_label >= label_tick_ebb_start
10178 && rsp->last_set_label < label_tick)
10179 || (rsp->last_set_label == label_tick
10180 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10181 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10182 && REGNO (x) < reg_n_sets_max
10183 && REG_N_SETS (REGNO (x)) == 1
10184 && !REGNO_REG_SET_P
10185 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10186 REGNO (x)))))
10188 *result = rsp->last_set_sign_bit_copies;
10189 return NULL;
10192 tem = get_last_value (x);
10193 if (tem != 0)
10194 return tem;
10196 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10197 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10198 *result = rsp->sign_bit_copies;
10200 return NULL;
10203 /* Return the number of "extended" bits there are in X, when interpreted
10204 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10205 unsigned quantities, this is the number of high-order zero bits.
10206 For signed quantities, this is the number of copies of the sign bit
10207 minus 1. In both case, this function returns the number of "spare"
10208 bits. For example, if two quantities for which this function returns
10209 at least 1 are added, the addition is known not to overflow.
10211 This function will always return 0 unless called during combine, which
10212 implies that it must be called from a define_split. */
10214 unsigned int
10215 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10217 if (nonzero_sign_valid == 0)
10218 return 0;
10220 scalar_int_mode int_mode;
10221 return (unsignedp
10222 ? (is_a <scalar_int_mode> (mode, &int_mode)
10223 && HWI_COMPUTABLE_MODE_P (int_mode)
10224 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10225 - floor_log2 (nonzero_bits (x, int_mode)))
10226 : 0)
10227 : num_sign_bit_copies (x, mode) - 1);
10230 /* This function is called from `simplify_shift_const' to merge two
10231 outer operations. Specifically, we have already found that we need
10232 to perform operation *POP0 with constant *PCONST0 at the outermost
10233 position. We would now like to also perform OP1 with constant CONST1
10234 (with *POP0 being done last).
10236 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10237 the resulting operation. *PCOMP_P is set to 1 if we would need to
10238 complement the innermost operand, otherwise it is unchanged.
10240 MODE is the mode in which the operation will be done. No bits outside
10241 the width of this mode matter. It is assumed that the width of this mode
10242 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10244 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10245 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10246 result is simply *PCONST0.
10248 If the resulting operation cannot be expressed as one operation, we
10249 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10251 static int
10252 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10254 enum rtx_code op0 = *pop0;
10255 HOST_WIDE_INT const0 = *pconst0;
10257 const0 &= GET_MODE_MASK (mode);
10258 const1 &= GET_MODE_MASK (mode);
10260 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10261 if (op0 == AND)
10262 const1 &= const0;
10264 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10265 if OP0 is SET. */
10267 if (op1 == UNKNOWN || op0 == SET)
10268 return 1;
10270 else if (op0 == UNKNOWN)
10271 op0 = op1, const0 = const1;
10273 else if (op0 == op1)
10275 switch (op0)
10277 case AND:
10278 const0 &= const1;
10279 break;
10280 case IOR:
10281 const0 |= const1;
10282 break;
10283 case XOR:
10284 const0 ^= const1;
10285 break;
10286 case PLUS:
10287 const0 += const1;
10288 break;
10289 case NEG:
10290 op0 = UNKNOWN;
10291 break;
10292 default:
10293 break;
10297 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10298 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10299 return 0;
10301 /* If the two constants aren't the same, we can't do anything. The
10302 remaining six cases can all be done. */
10303 else if (const0 != const1)
10304 return 0;
10306 else
10307 switch (op0)
10309 case IOR:
10310 if (op1 == AND)
10311 /* (a & b) | b == b */
10312 op0 = SET;
10313 else /* op1 == XOR */
10314 /* (a ^ b) | b == a | b */
10316 break;
10318 case XOR:
10319 if (op1 == AND)
10320 /* (a & b) ^ b == (~a) & b */
10321 op0 = AND, *pcomp_p = 1;
10322 else /* op1 == IOR */
10323 /* (a | b) ^ b == a & ~b */
10324 op0 = AND, const0 = ~const0;
10325 break;
10327 case AND:
10328 if (op1 == IOR)
10329 /* (a | b) & b == b */
10330 op0 = SET;
10331 else /* op1 == XOR */
10332 /* (a ^ b) & b) == (~a) & b */
10333 *pcomp_p = 1;
10334 break;
10335 default:
10336 break;
10339 /* Check for NO-OP cases. */
10340 const0 &= GET_MODE_MASK (mode);
10341 if (const0 == 0
10342 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10343 op0 = UNKNOWN;
10344 else if (const0 == 0 && op0 == AND)
10345 op0 = SET;
10346 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10347 && op0 == AND)
10348 op0 = UNKNOWN;
10350 *pop0 = op0;
10352 /* ??? Slightly redundant with the above mask, but not entirely.
10353 Moving this above means we'd have to sign-extend the mode mask
10354 for the final test. */
10355 if (op0 != UNKNOWN && op0 != NEG)
10356 *pconst0 = trunc_int_for_mode (const0, mode);
10358 return 1;
10361 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10362 the shift in. The original shift operation CODE is performed on OP in
10363 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10364 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10365 result of the shift is subject to operation OUTER_CODE with operand
10366 OUTER_CONST. */
10368 static scalar_int_mode
10369 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10370 scalar_int_mode orig_mode, scalar_int_mode mode,
10371 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10373 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10375 /* In general we can't perform in wider mode for right shift and rotate. */
10376 switch (code)
10378 case ASHIFTRT:
10379 /* We can still widen if the bits brought in from the left are identical
10380 to the sign bit of ORIG_MODE. */
10381 if (num_sign_bit_copies (op, mode)
10382 > (unsigned) (GET_MODE_PRECISION (mode)
10383 - GET_MODE_PRECISION (orig_mode)))
10384 return mode;
10385 return orig_mode;
10387 case LSHIFTRT:
10388 /* Similarly here but with zero bits. */
10389 if (HWI_COMPUTABLE_MODE_P (mode)
10390 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10391 return mode;
10393 /* We can also widen if the bits brought in will be masked off. This
10394 operation is performed in ORIG_MODE. */
10395 if (outer_code == AND)
10397 int care_bits = low_bitmask_len (orig_mode, outer_const);
10399 if (care_bits >= 0
10400 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10401 return mode;
10403 /* fall through */
10405 case ROTATE:
10406 return orig_mode;
10408 case ROTATERT:
10409 gcc_unreachable ();
10411 default:
10412 return mode;
10416 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10417 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10418 if we cannot simplify it. Otherwise, return a simplified value.
10420 The shift is normally computed in the widest mode we find in VAROP, as
10421 long as it isn't a different number of words than RESULT_MODE. Exceptions
10422 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10424 static rtx
10425 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10426 rtx varop, int orig_count)
10428 enum rtx_code orig_code = code;
10429 rtx orig_varop = varop;
10430 int count;
10431 machine_mode mode = result_mode;
10432 machine_mode shift_mode;
10433 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10434 unsigned int mode_words
10435 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10436 /* We form (outer_op (code varop count) (outer_const)). */
10437 enum rtx_code outer_op = UNKNOWN;
10438 HOST_WIDE_INT outer_const = 0;
10439 int complement_p = 0;
10440 rtx new_rtx, x;
10442 /* Make sure and truncate the "natural" shift on the way in. We don't
10443 want to do this inside the loop as it makes it more difficult to
10444 combine shifts. */
10445 if (SHIFT_COUNT_TRUNCATED)
10446 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10448 /* If we were given an invalid count, don't do anything except exactly
10449 what was requested. */
10451 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10452 return NULL_RTX;
10454 count = orig_count;
10456 /* Unless one of the branches of the `if' in this loop does a `continue',
10457 we will `break' the loop after the `if'. */
10459 while (count != 0)
10461 /* If we have an operand of (clobber (const_int 0)), fail. */
10462 if (GET_CODE (varop) == CLOBBER)
10463 return NULL_RTX;
10465 /* Convert ROTATERT to ROTATE. */
10466 if (code == ROTATERT)
10468 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10469 code = ROTATE;
10470 count = bitsize - count;
10473 shift_mode = result_mode;
10474 if (shift_mode != mode)
10476 /* We only change the modes of scalar shifts. */
10477 int_mode = as_a <scalar_int_mode> (mode);
10478 int_result_mode = as_a <scalar_int_mode> (result_mode);
10479 shift_mode = try_widen_shift_mode (code, varop, count,
10480 int_result_mode, int_mode,
10481 outer_op, outer_const);
10484 scalar_int_mode shift_unit_mode
10485 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10487 /* Handle cases where the count is greater than the size of the mode
10488 minus 1. For ASHIFT, use the size minus one as the count (this can
10489 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10490 take the count modulo the size. For other shifts, the result is
10491 zero.
10493 Since these shifts are being produced by the compiler by combining
10494 multiple operations, each of which are defined, we know what the
10495 result is supposed to be. */
10497 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10499 if (code == ASHIFTRT)
10500 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10501 else if (code == ROTATE || code == ROTATERT)
10502 count %= GET_MODE_PRECISION (shift_unit_mode);
10503 else
10505 /* We can't simply return zero because there may be an
10506 outer op. */
10507 varop = const0_rtx;
10508 count = 0;
10509 break;
10513 /* If we discovered we had to complement VAROP, leave. Making a NOT
10514 here would cause an infinite loop. */
10515 if (complement_p)
10516 break;
10518 if (shift_mode == shift_unit_mode)
10520 /* An arithmetic right shift of a quantity known to be -1 or 0
10521 is a no-op. */
10522 if (code == ASHIFTRT
10523 && (num_sign_bit_copies (varop, shift_unit_mode)
10524 == GET_MODE_PRECISION (shift_unit_mode)))
10526 count = 0;
10527 break;
10530 /* If we are doing an arithmetic right shift and discarding all but
10531 the sign bit copies, this is equivalent to doing a shift by the
10532 bitsize minus one. Convert it into that shift because it will
10533 often allow other simplifications. */
10535 if (code == ASHIFTRT
10536 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10537 >= GET_MODE_PRECISION (shift_unit_mode)))
10538 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10540 /* We simplify the tests below and elsewhere by converting
10541 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10542 `make_compound_operation' will convert it to an ASHIFTRT for
10543 those machines (such as VAX) that don't have an LSHIFTRT. */
10544 if (code == ASHIFTRT
10545 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10546 && val_signbit_known_clear_p (shift_unit_mode,
10547 nonzero_bits (varop,
10548 shift_unit_mode)))
10549 code = LSHIFTRT;
10551 if (((code == LSHIFTRT
10552 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10553 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10554 || (code == ASHIFT
10555 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10556 && !((nonzero_bits (varop, shift_unit_mode) << count)
10557 & GET_MODE_MASK (shift_unit_mode))))
10558 && !side_effects_p (varop))
10559 varop = const0_rtx;
10562 switch (GET_CODE (varop))
10564 case SIGN_EXTEND:
10565 case ZERO_EXTEND:
10566 case SIGN_EXTRACT:
10567 case ZERO_EXTRACT:
10568 new_rtx = expand_compound_operation (varop);
10569 if (new_rtx != varop)
10571 varop = new_rtx;
10572 continue;
10574 break;
10576 case MEM:
10577 /* The following rules apply only to scalars. */
10578 if (shift_mode != shift_unit_mode)
10579 break;
10580 int_mode = as_a <scalar_int_mode> (mode);
10582 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10583 minus the width of a smaller mode, we can do this with a
10584 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10585 if ((code == ASHIFTRT || code == LSHIFTRT)
10586 && ! mode_dependent_address_p (XEXP (varop, 0),
10587 MEM_ADDR_SPACE (varop))
10588 && ! MEM_VOLATILE_P (varop)
10589 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10590 .exists (&tmode)))
10592 new_rtx = adjust_address_nv (varop, tmode,
10593 BYTES_BIG_ENDIAN ? 0
10594 : count / BITS_PER_UNIT);
10596 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10597 : ZERO_EXTEND, int_mode, new_rtx);
10598 count = 0;
10599 continue;
10601 break;
10603 case SUBREG:
10604 /* The following rules apply only to scalars. */
10605 if (shift_mode != shift_unit_mode)
10606 break;
10607 int_mode = as_a <scalar_int_mode> (mode);
10608 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10610 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10611 the same number of words as what we've seen so far. Then store
10612 the widest mode in MODE. */
10613 if (subreg_lowpart_p (varop)
10614 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10615 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10616 && (unsigned int) ((GET_MODE_SIZE (inner_mode)
10617 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10618 == mode_words
10619 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10621 varop = SUBREG_REG (varop);
10622 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10623 mode = inner_mode;
10624 continue;
10626 break;
10628 case MULT:
10629 /* Some machines use MULT instead of ASHIFT because MULT
10630 is cheaper. But it is still better on those machines to
10631 merge two shifts into one. */
10632 if (CONST_INT_P (XEXP (varop, 1))
10633 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10635 varop
10636 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10637 XEXP (varop, 0),
10638 GEN_INT (exact_log2 (
10639 UINTVAL (XEXP (varop, 1)))));
10640 continue;
10642 break;
10644 case UDIV:
10645 /* Similar, for when divides are cheaper. */
10646 if (CONST_INT_P (XEXP (varop, 1))
10647 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10649 varop
10650 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10651 XEXP (varop, 0),
10652 GEN_INT (exact_log2 (
10653 UINTVAL (XEXP (varop, 1)))));
10654 continue;
10656 break;
10658 case ASHIFTRT:
10659 /* If we are extracting just the sign bit of an arithmetic
10660 right shift, that shift is not needed. However, the sign
10661 bit of a wider mode may be different from what would be
10662 interpreted as the sign bit in a narrower mode, so, if
10663 the result is narrower, don't discard the shift. */
10664 if (code == LSHIFTRT
10665 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10666 && (GET_MODE_UNIT_BITSIZE (result_mode)
10667 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10669 varop = XEXP (varop, 0);
10670 continue;
10673 /* fall through */
10675 case LSHIFTRT:
10676 case ASHIFT:
10677 case ROTATE:
10678 /* The following rules apply only to scalars. */
10679 if (shift_mode != shift_unit_mode)
10680 break;
10681 int_mode = as_a <scalar_int_mode> (mode);
10682 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10683 int_result_mode = as_a <scalar_int_mode> (result_mode);
10685 /* Here we have two nested shifts. The result is usually the
10686 AND of a new shift with a mask. We compute the result below. */
10687 if (CONST_INT_P (XEXP (varop, 1))
10688 && INTVAL (XEXP (varop, 1)) >= 0
10689 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10690 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10691 && HWI_COMPUTABLE_MODE_P (int_mode))
10693 enum rtx_code first_code = GET_CODE (varop);
10694 unsigned int first_count = INTVAL (XEXP (varop, 1));
10695 unsigned HOST_WIDE_INT mask;
10696 rtx mask_rtx;
10698 /* We have one common special case. We can't do any merging if
10699 the inner code is an ASHIFTRT of a smaller mode. However, if
10700 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10701 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10702 we can convert it to
10703 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10704 This simplifies certain SIGN_EXTEND operations. */
10705 if (code == ASHIFT && first_code == ASHIFTRT
10706 && count == (GET_MODE_PRECISION (int_result_mode)
10707 - GET_MODE_PRECISION (int_varop_mode)))
10709 /* C3 has the low-order C1 bits zero. */
10711 mask = GET_MODE_MASK (int_mode)
10712 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10714 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10715 XEXP (varop, 0), mask);
10716 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10717 int_result_mode, varop, count);
10718 count = first_count;
10719 code = ASHIFTRT;
10720 continue;
10723 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10724 than C1 high-order bits equal to the sign bit, we can convert
10725 this to either an ASHIFT or an ASHIFTRT depending on the
10726 two counts.
10728 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10730 if (code == ASHIFTRT && first_code == ASHIFT
10731 && int_varop_mode == shift_unit_mode
10732 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10733 > first_count))
10735 varop = XEXP (varop, 0);
10736 count -= first_count;
10737 if (count < 0)
10739 count = -count;
10740 code = ASHIFT;
10743 continue;
10746 /* There are some cases we can't do. If CODE is ASHIFTRT,
10747 we can only do this if FIRST_CODE is also ASHIFTRT.
10749 We can't do the case when CODE is ROTATE and FIRST_CODE is
10750 ASHIFTRT.
10752 If the mode of this shift is not the mode of the outer shift,
10753 we can't do this if either shift is a right shift or ROTATE.
10755 Finally, we can't do any of these if the mode is too wide
10756 unless the codes are the same.
10758 Handle the case where the shift codes are the same
10759 first. */
10761 if (code == first_code)
10763 if (int_varop_mode != int_result_mode
10764 && (code == ASHIFTRT || code == LSHIFTRT
10765 || code == ROTATE))
10766 break;
10768 count += first_count;
10769 varop = XEXP (varop, 0);
10770 continue;
10773 if (code == ASHIFTRT
10774 || (code == ROTATE && first_code == ASHIFTRT)
10775 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10776 || (int_varop_mode != int_result_mode
10777 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10778 || first_code == ROTATE
10779 || code == ROTATE)))
10780 break;
10782 /* To compute the mask to apply after the shift, shift the
10783 nonzero bits of the inner shift the same way the
10784 outer shift will. */
10786 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10787 int_result_mode);
10789 mask_rtx
10790 = simplify_const_binary_operation (code, int_result_mode,
10791 mask_rtx, GEN_INT (count));
10793 /* Give up if we can't compute an outer operation to use. */
10794 if (mask_rtx == 0
10795 || !CONST_INT_P (mask_rtx)
10796 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10797 INTVAL (mask_rtx),
10798 int_result_mode, &complement_p))
10799 break;
10801 /* If the shifts are in the same direction, we add the
10802 counts. Otherwise, we subtract them. */
10803 if ((code == ASHIFTRT || code == LSHIFTRT)
10804 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10805 count += first_count;
10806 else
10807 count -= first_count;
10809 /* If COUNT is positive, the new shift is usually CODE,
10810 except for the two exceptions below, in which case it is
10811 FIRST_CODE. If the count is negative, FIRST_CODE should
10812 always be used */
10813 if (count > 0
10814 && ((first_code == ROTATE && code == ASHIFT)
10815 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10816 code = first_code;
10817 else if (count < 0)
10818 code = first_code, count = -count;
10820 varop = XEXP (varop, 0);
10821 continue;
10824 /* If we have (A << B << C) for any shift, we can convert this to
10825 (A << C << B). This wins if A is a constant. Only try this if
10826 B is not a constant. */
10828 else if (GET_CODE (varop) == code
10829 && CONST_INT_P (XEXP (varop, 0))
10830 && !CONST_INT_P (XEXP (varop, 1)))
10832 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10833 sure the result will be masked. See PR70222. */
10834 if (code == LSHIFTRT
10835 && int_mode != int_result_mode
10836 && !merge_outer_ops (&outer_op, &outer_const, AND,
10837 GET_MODE_MASK (int_result_mode)
10838 >> orig_count, int_result_mode,
10839 &complement_p))
10840 break;
10841 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10842 up outer sign extension (often left and right shift) is
10843 hardly more efficient than the original. See PR70429. */
10844 if (code == ASHIFTRT && int_mode != int_result_mode)
10845 break;
10847 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10848 XEXP (varop, 0),
10849 GEN_INT (count));
10850 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10851 count = 0;
10852 continue;
10854 break;
10856 case NOT:
10857 /* The following rules apply only to scalars. */
10858 if (shift_mode != shift_unit_mode)
10859 break;
10861 /* Make this fit the case below. */
10862 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10863 continue;
10865 case IOR:
10866 case AND:
10867 case XOR:
10868 /* The following rules apply only to scalars. */
10869 if (shift_mode != shift_unit_mode)
10870 break;
10871 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10872 int_result_mode = as_a <scalar_int_mode> (result_mode);
10874 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10875 with C the size of VAROP - 1 and the shift is logical if
10876 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10877 we have an (le X 0) operation. If we have an arithmetic shift
10878 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10879 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10881 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10882 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10883 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10884 && (code == LSHIFTRT || code == ASHIFTRT)
10885 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10886 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10888 count = 0;
10889 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10890 const0_rtx);
10892 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10893 varop = gen_rtx_NEG (int_varop_mode, varop);
10895 continue;
10898 /* If we have (shift (logical)), move the logical to the outside
10899 to allow it to possibly combine with another logical and the
10900 shift to combine with another shift. This also canonicalizes to
10901 what a ZERO_EXTRACT looks like. Also, some machines have
10902 (and (shift)) insns. */
10904 if (CONST_INT_P (XEXP (varop, 1))
10905 /* We can't do this if we have (ashiftrt (xor)) and the
10906 constant has its sign bit set in shift_unit_mode with
10907 shift_unit_mode wider than result_mode. */
10908 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10909 && int_result_mode != shift_unit_mode
10910 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10911 shift_unit_mode))
10912 && (new_rtx = simplify_const_binary_operation
10913 (code, int_result_mode,
10914 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10915 GEN_INT (count))) != 0
10916 && CONST_INT_P (new_rtx)
10917 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10918 INTVAL (new_rtx), int_result_mode,
10919 &complement_p))
10921 varop = XEXP (varop, 0);
10922 continue;
10925 /* If we can't do that, try to simplify the shift in each arm of the
10926 logical expression, make a new logical expression, and apply
10927 the inverse distributive law. This also can't be done for
10928 (ashiftrt (xor)) where we've widened the shift and the constant
10929 changes the sign bit. */
10930 if (CONST_INT_P (XEXP (varop, 1))
10931 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10932 && int_result_mode != shift_unit_mode
10933 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10934 shift_unit_mode)))
10936 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10937 XEXP (varop, 0), count);
10938 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10939 XEXP (varop, 1), count);
10941 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
10942 lhs, rhs);
10943 varop = apply_distributive_law (varop);
10945 count = 0;
10946 continue;
10948 break;
10950 case EQ:
10951 /* The following rules apply only to scalars. */
10952 if (shift_mode != shift_unit_mode)
10953 break;
10954 int_result_mode = as_a <scalar_int_mode> (result_mode);
10956 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10957 says that the sign bit can be tested, FOO has mode MODE, C is
10958 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10959 that may be nonzero. */
10960 if (code == LSHIFTRT
10961 && XEXP (varop, 1) == const0_rtx
10962 && GET_MODE (XEXP (varop, 0)) == int_result_mode
10963 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10964 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10965 && STORE_FLAG_VALUE == -1
10966 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10967 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10968 int_result_mode, &complement_p))
10970 varop = XEXP (varop, 0);
10971 count = 0;
10972 continue;
10974 break;
10976 case NEG:
10977 /* The following rules apply only to scalars. */
10978 if (shift_mode != shift_unit_mode)
10979 break;
10980 int_result_mode = as_a <scalar_int_mode> (result_mode);
10982 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10983 than the number of bits in the mode is equivalent to A. */
10984 if (code == LSHIFTRT
10985 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10986 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
10988 varop = XEXP (varop, 0);
10989 count = 0;
10990 continue;
10993 /* NEG commutes with ASHIFT since it is multiplication. Move the
10994 NEG outside to allow shifts to combine. */
10995 if (code == ASHIFT
10996 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
10997 int_result_mode, &complement_p))
10999 varop = XEXP (varop, 0);
11000 continue;
11002 break;
11004 case PLUS:
11005 /* The following rules apply only to scalars. */
11006 if (shift_mode != shift_unit_mode)
11007 break;
11008 int_result_mode = as_a <scalar_int_mode> (result_mode);
11010 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11011 is one less than the number of bits in the mode is
11012 equivalent to (xor A 1). */
11013 if (code == LSHIFTRT
11014 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11015 && XEXP (varop, 1) == constm1_rtx
11016 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11017 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11018 int_result_mode, &complement_p))
11020 count = 0;
11021 varop = XEXP (varop, 0);
11022 continue;
11025 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11026 that might be nonzero in BAR are those being shifted out and those
11027 bits are known zero in FOO, we can replace the PLUS with FOO.
11028 Similarly in the other operand order. This code occurs when
11029 we are computing the size of a variable-size array. */
11031 if ((code == ASHIFTRT || code == LSHIFTRT)
11032 && count < HOST_BITS_PER_WIDE_INT
11033 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11034 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11035 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11037 varop = XEXP (varop, 0);
11038 continue;
11040 else if ((code == ASHIFTRT || code == LSHIFTRT)
11041 && count < HOST_BITS_PER_WIDE_INT
11042 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11043 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11044 >> count)
11045 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11046 & nonzero_bits (XEXP (varop, 1), int_result_mode)))
11048 varop = XEXP (varop, 1);
11049 continue;
11052 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11053 if (code == ASHIFT
11054 && CONST_INT_P (XEXP (varop, 1))
11055 && (new_rtx = simplify_const_binary_operation
11056 (ASHIFT, int_result_mode,
11057 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11058 GEN_INT (count))) != 0
11059 && CONST_INT_P (new_rtx)
11060 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11061 INTVAL (new_rtx), int_result_mode,
11062 &complement_p))
11064 varop = XEXP (varop, 0);
11065 continue;
11068 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11069 signbit', and attempt to change the PLUS to an XOR and move it to
11070 the outer operation as is done above in the AND/IOR/XOR case
11071 leg for shift(logical). See details in logical handling above
11072 for reasoning in doing so. */
11073 if (code == LSHIFTRT
11074 && CONST_INT_P (XEXP (varop, 1))
11075 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11076 && (new_rtx = simplify_const_binary_operation
11077 (code, int_result_mode,
11078 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11079 GEN_INT (count))) != 0
11080 && CONST_INT_P (new_rtx)
11081 && merge_outer_ops (&outer_op, &outer_const, XOR,
11082 INTVAL (new_rtx), int_result_mode,
11083 &complement_p))
11085 varop = XEXP (varop, 0);
11086 continue;
11089 break;
11091 case MINUS:
11092 /* The following rules apply only to scalars. */
11093 if (shift_mode != shift_unit_mode)
11094 break;
11095 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11097 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11098 with C the size of VAROP - 1 and the shift is logical if
11099 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11100 we have a (gt X 0) operation. If the shift is arithmetic with
11101 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11102 we have a (neg (gt X 0)) operation. */
11104 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11105 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11106 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11107 && (code == LSHIFTRT || code == ASHIFTRT)
11108 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11109 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11110 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11112 count = 0;
11113 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11114 const0_rtx);
11116 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11117 varop = gen_rtx_NEG (int_varop_mode, varop);
11119 continue;
11121 break;
11123 case TRUNCATE:
11124 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11125 if the truncate does not affect the value. */
11126 if (code == LSHIFTRT
11127 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11128 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11129 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11130 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11131 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11133 rtx varop_inner = XEXP (varop, 0);
11135 varop_inner
11136 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11137 XEXP (varop_inner, 0),
11138 GEN_INT
11139 (count + INTVAL (XEXP (varop_inner, 1))));
11140 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11141 count = 0;
11142 continue;
11144 break;
11146 default:
11147 break;
11150 break;
11153 shift_mode = result_mode;
11154 if (shift_mode != mode)
11156 /* We only change the modes of scalar shifts. */
11157 int_mode = as_a <scalar_int_mode> (mode);
11158 int_result_mode = as_a <scalar_int_mode> (result_mode);
11159 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11160 int_mode, outer_op, outer_const);
11163 /* We have now finished analyzing the shift. The result should be
11164 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11165 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11166 to the result of the shift. OUTER_CONST is the relevant constant,
11167 but we must turn off all bits turned off in the shift. */
11169 if (outer_op == UNKNOWN
11170 && orig_code == code && orig_count == count
11171 && varop == orig_varop
11172 && shift_mode == GET_MODE (varop))
11173 return NULL_RTX;
11175 /* Make a SUBREG if necessary. If we can't make it, fail. */
11176 varop = gen_lowpart (shift_mode, varop);
11177 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11178 return NULL_RTX;
11180 /* If we have an outer operation and we just made a shift, it is
11181 possible that we could have simplified the shift were it not
11182 for the outer operation. So try to do the simplification
11183 recursively. */
11185 if (outer_op != UNKNOWN)
11186 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11187 else
11188 x = NULL_RTX;
11190 if (x == NULL_RTX)
11191 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
11193 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11194 turn off all the bits that the shift would have turned off. */
11195 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11196 /* We only change the modes of scalar shifts. */
11197 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11198 x, GET_MODE_MASK (result_mode) >> orig_count);
11200 /* Do the remainder of the processing in RESULT_MODE. */
11201 x = gen_lowpart_or_truncate (result_mode, x);
11203 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11204 operation. */
11205 if (complement_p)
11206 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11208 if (outer_op != UNKNOWN)
11210 int_result_mode = as_a <scalar_int_mode> (result_mode);
11212 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11213 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11214 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11216 if (outer_op == AND)
11217 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11218 else if (outer_op == SET)
11220 /* This means that we have determined that the result is
11221 equivalent to a constant. This should be rare. */
11222 if (!side_effects_p (x))
11223 x = GEN_INT (outer_const);
11225 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11226 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11227 else
11228 x = simplify_gen_binary (outer_op, int_result_mode, x,
11229 GEN_INT (outer_const));
11232 return x;
11235 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11236 The result of the shift is RESULT_MODE. If we cannot simplify it,
11237 return X or, if it is NULL, synthesize the expression with
11238 simplify_gen_binary. Otherwise, return a simplified value.
11240 The shift is normally computed in the widest mode we find in VAROP, as
11241 long as it isn't a different number of words than RESULT_MODE. Exceptions
11242 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11244 static rtx
11245 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11246 rtx varop, int count)
11248 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11249 if (tem)
11250 return tem;
11252 if (!x)
11253 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
11254 if (GET_MODE (x) != result_mode)
11255 x = gen_lowpart (result_mode, x);
11256 return x;
11260 /* A subroutine of recog_for_combine. See there for arguments and
11261 return value. */
11263 static int
11264 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11266 rtx pat = *pnewpat;
11267 rtx pat_without_clobbers;
11268 int insn_code_number;
11269 int num_clobbers_to_add = 0;
11270 int i;
11271 rtx notes = NULL_RTX;
11272 rtx old_notes, old_pat;
11273 int old_icode;
11275 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11276 we use to indicate that something didn't match. If we find such a
11277 thing, force rejection. */
11278 if (GET_CODE (pat) == PARALLEL)
11279 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11280 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11281 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11282 return -1;
11284 old_pat = PATTERN (insn);
11285 old_notes = REG_NOTES (insn);
11286 PATTERN (insn) = pat;
11287 REG_NOTES (insn) = NULL_RTX;
11289 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11290 if (dump_file && (dump_flags & TDF_DETAILS))
11292 if (insn_code_number < 0)
11293 fputs ("Failed to match this instruction:\n", dump_file);
11294 else
11295 fputs ("Successfully matched this instruction:\n", dump_file);
11296 print_rtl_single (dump_file, pat);
11299 /* If it isn't, there is the possibility that we previously had an insn
11300 that clobbered some register as a side effect, but the combined
11301 insn doesn't need to do that. So try once more without the clobbers
11302 unless this represents an ASM insn. */
11304 if (insn_code_number < 0 && ! check_asm_operands (pat)
11305 && GET_CODE (pat) == PARALLEL)
11307 int pos;
11309 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11310 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11312 if (i != pos)
11313 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11314 pos++;
11317 SUBST_INT (XVECLEN (pat, 0), pos);
11319 if (pos == 1)
11320 pat = XVECEXP (pat, 0, 0);
11322 PATTERN (insn) = pat;
11323 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11324 if (dump_file && (dump_flags & TDF_DETAILS))
11326 if (insn_code_number < 0)
11327 fputs ("Failed to match this instruction:\n", dump_file);
11328 else
11329 fputs ("Successfully matched this instruction:\n", dump_file);
11330 print_rtl_single (dump_file, pat);
11334 pat_without_clobbers = pat;
11336 PATTERN (insn) = old_pat;
11337 REG_NOTES (insn) = old_notes;
11339 /* Recognize all noop sets, these will be killed by followup pass. */
11340 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11341 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11343 /* If we had any clobbers to add, make a new pattern than contains
11344 them. Then check to make sure that all of them are dead. */
11345 if (num_clobbers_to_add)
11347 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11348 rtvec_alloc (GET_CODE (pat) == PARALLEL
11349 ? (XVECLEN (pat, 0)
11350 + num_clobbers_to_add)
11351 : num_clobbers_to_add + 1));
11353 if (GET_CODE (pat) == PARALLEL)
11354 for (i = 0; i < XVECLEN (pat, 0); i++)
11355 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11356 else
11357 XVECEXP (newpat, 0, 0) = pat;
11359 add_clobbers (newpat, insn_code_number);
11361 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11362 i < XVECLEN (newpat, 0); i++)
11364 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11365 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11366 return -1;
11367 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11369 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11370 notes = alloc_reg_note (REG_UNUSED,
11371 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11374 pat = newpat;
11377 if (insn_code_number >= 0
11378 && insn_code_number != NOOP_MOVE_INSN_CODE)
11380 old_pat = PATTERN (insn);
11381 old_notes = REG_NOTES (insn);
11382 old_icode = INSN_CODE (insn);
11383 PATTERN (insn) = pat;
11384 REG_NOTES (insn) = notes;
11385 INSN_CODE (insn) = insn_code_number;
11387 /* Allow targets to reject combined insn. */
11388 if (!targetm.legitimate_combined_insn (insn))
11390 if (dump_file && (dump_flags & TDF_DETAILS))
11391 fputs ("Instruction not appropriate for target.",
11392 dump_file);
11394 /* Callers expect recog_for_combine to strip
11395 clobbers from the pattern on failure. */
11396 pat = pat_without_clobbers;
11397 notes = NULL_RTX;
11399 insn_code_number = -1;
11402 PATTERN (insn) = old_pat;
11403 REG_NOTES (insn) = old_notes;
11404 INSN_CODE (insn) = old_icode;
11407 *pnewpat = pat;
11408 *pnotes = notes;
11410 return insn_code_number;
11413 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11414 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11415 Return whether anything was so changed. */
11417 static bool
11418 change_zero_ext (rtx pat)
11420 bool changed = false;
11421 rtx *src = &SET_SRC (pat);
11423 subrtx_ptr_iterator::array_type array;
11424 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11426 rtx x = **iter;
11427 scalar_int_mode mode, inner_mode;
11428 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11429 continue;
11430 int size;
11432 if (GET_CODE (x) == ZERO_EXTRACT
11433 && CONST_INT_P (XEXP (x, 1))
11434 && CONST_INT_P (XEXP (x, 2))
11435 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11436 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11438 size = INTVAL (XEXP (x, 1));
11440 int start = INTVAL (XEXP (x, 2));
11441 if (BITS_BIG_ENDIAN)
11442 start = GET_MODE_PRECISION (inner_mode) - size - start;
11444 if (start)
11445 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), GEN_INT (start));
11446 else
11447 x = XEXP (x, 0);
11448 if (mode != inner_mode)
11449 x = gen_lowpart_SUBREG (mode, x);
11451 else if (GET_CODE (x) == ZERO_EXTEND
11452 && GET_CODE (XEXP (x, 0)) == SUBREG
11453 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11454 && !paradoxical_subreg_p (XEXP (x, 0))
11455 && subreg_lowpart_p (XEXP (x, 0)))
11457 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11458 size = GET_MODE_PRECISION (inner_mode);
11459 x = SUBREG_REG (XEXP (x, 0));
11460 if (GET_MODE (x) != mode)
11461 x = gen_lowpart_SUBREG (mode, x);
11463 else if (GET_CODE (x) == ZERO_EXTEND
11464 && REG_P (XEXP (x, 0))
11465 && HARD_REGISTER_P (XEXP (x, 0))
11466 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11468 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11469 size = GET_MODE_PRECISION (inner_mode);
11470 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11472 else
11473 continue;
11475 if (!(GET_CODE (x) == LSHIFTRT
11476 && CONST_INT_P (XEXP (x, 1))
11477 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11479 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11480 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11483 SUBST (**iter, x);
11484 changed = true;
11487 if (changed)
11488 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11489 maybe_swap_commutative_operands (**iter);
11491 rtx *dst = &SET_DEST (pat);
11492 scalar_int_mode mode;
11493 if (GET_CODE (*dst) == ZERO_EXTRACT
11494 && REG_P (XEXP (*dst, 0))
11495 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11496 && CONST_INT_P (XEXP (*dst, 1))
11497 && CONST_INT_P (XEXP (*dst, 2)))
11499 rtx reg = XEXP (*dst, 0);
11500 int width = INTVAL (XEXP (*dst, 1));
11501 int offset = INTVAL (XEXP (*dst, 2));
11502 int reg_width = GET_MODE_PRECISION (mode);
11503 if (BITS_BIG_ENDIAN)
11504 offset = reg_width - width - offset;
11506 rtx x, y, z, w;
11507 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11508 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11509 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11510 if (offset)
11511 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11512 else
11513 y = SET_SRC (pat);
11514 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11515 w = gen_rtx_IOR (mode, x, z);
11516 SUBST (SET_DEST (pat), reg);
11517 SUBST (SET_SRC (pat), w);
11519 changed = true;
11522 return changed;
11525 /* Like recog, but we receive the address of a pointer to a new pattern.
11526 We try to match the rtx that the pointer points to.
11527 If that fails, we may try to modify or replace the pattern,
11528 storing the replacement into the same pointer object.
11530 Modifications include deletion or addition of CLOBBERs. If the
11531 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11532 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11533 (and undo if that fails).
11535 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11536 the CLOBBERs are placed.
11538 The value is the final insn code from the pattern ultimately matched,
11539 or -1. */
11541 static int
11542 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11544 rtx pat = *pnewpat;
11545 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11546 if (insn_code_number >= 0 || check_asm_operands (pat))
11547 return insn_code_number;
11549 void *marker = get_undo_marker ();
11550 bool changed = false;
11552 if (GET_CODE (pat) == SET)
11553 changed = change_zero_ext (pat);
11554 else if (GET_CODE (pat) == PARALLEL)
11556 int i;
11557 for (i = 0; i < XVECLEN (pat, 0); i++)
11559 rtx set = XVECEXP (pat, 0, i);
11560 if (GET_CODE (set) == SET)
11561 changed |= change_zero_ext (set);
11565 if (changed)
11567 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11569 if (insn_code_number < 0)
11570 undo_to_marker (marker);
11573 return insn_code_number;
11576 /* Like gen_lowpart_general but for use by combine. In combine it
11577 is not possible to create any new pseudoregs. However, it is
11578 safe to create invalid memory addresses, because combine will
11579 try to recognize them and all they will do is make the combine
11580 attempt fail.
11582 If for some reason this cannot do its job, an rtx
11583 (clobber (const_int 0)) is returned.
11584 An insn containing that will not be recognized. */
11586 static rtx
11587 gen_lowpart_for_combine (machine_mode omode, rtx x)
11589 machine_mode imode = GET_MODE (x);
11590 unsigned int osize = GET_MODE_SIZE (omode);
11591 unsigned int isize = GET_MODE_SIZE (imode);
11592 rtx result;
11594 if (omode == imode)
11595 return x;
11597 /* We can only support MODE being wider than a word if X is a
11598 constant integer or has a mode the same size. */
11599 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11600 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11601 goto fail;
11603 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11604 won't know what to do. So we will strip off the SUBREG here and
11605 process normally. */
11606 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11608 x = SUBREG_REG (x);
11610 /* For use in case we fall down into the address adjustments
11611 further below, we need to adjust the known mode and size of
11612 x; imode and isize, since we just adjusted x. */
11613 imode = GET_MODE (x);
11615 if (imode == omode)
11616 return x;
11618 isize = GET_MODE_SIZE (imode);
11621 result = gen_lowpart_common (omode, x);
11623 if (result)
11624 return result;
11626 if (MEM_P (x))
11628 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11629 address. */
11630 if (MEM_VOLATILE_P (x)
11631 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11632 goto fail;
11634 /* If we want to refer to something bigger than the original memref,
11635 generate a paradoxical subreg instead. That will force a reload
11636 of the original memref X. */
11637 if (paradoxical_subreg_p (omode, imode))
11638 return gen_rtx_SUBREG (omode, x, 0);
11640 HOST_WIDE_INT offset = byte_lowpart_offset (omode, imode);
11641 return adjust_address_nv (x, omode, offset);
11644 /* If X is a comparison operator, rewrite it in a new mode. This
11645 probably won't match, but may allow further simplifications. */
11646 else if (COMPARISON_P (x))
11647 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11649 /* If we couldn't simplify X any other way, just enclose it in a
11650 SUBREG. Normally, this SUBREG won't match, but some patterns may
11651 include an explicit SUBREG or we may simplify it further in combine. */
11652 else
11654 rtx res;
11656 if (imode == VOIDmode)
11658 imode = int_mode_for_mode (omode).require ();
11659 x = gen_lowpart_common (imode, x);
11660 if (x == NULL)
11661 goto fail;
11663 res = lowpart_subreg (omode, x, imode);
11664 if (res)
11665 return res;
11668 fail:
11669 return gen_rtx_CLOBBER (omode, const0_rtx);
11672 /* Try to simplify a comparison between OP0 and a constant OP1,
11673 where CODE is the comparison code that will be tested, into a
11674 (CODE OP0 const0_rtx) form.
11676 The result is a possibly different comparison code to use.
11677 *POP1 may be updated. */
11679 static enum rtx_code
11680 simplify_compare_const (enum rtx_code code, machine_mode mode,
11681 rtx op0, rtx *pop1)
11683 scalar_int_mode int_mode;
11684 HOST_WIDE_INT const_op = INTVAL (*pop1);
11686 /* Get the constant we are comparing against and turn off all bits
11687 not on in our mode. */
11688 if (mode != VOIDmode)
11689 const_op = trunc_int_for_mode (const_op, mode);
11691 /* If we are comparing against a constant power of two and the value
11692 being compared can only have that single bit nonzero (e.g., it was
11693 `and'ed with that bit), we can replace this with a comparison
11694 with zero. */
11695 if (const_op
11696 && (code == EQ || code == NE || code == GE || code == GEU
11697 || code == LT || code == LTU)
11698 && is_a <scalar_int_mode> (mode, &int_mode)
11699 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11700 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11701 && (nonzero_bits (op0, int_mode)
11702 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11704 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11705 const_op = 0;
11708 /* Similarly, if we are comparing a value known to be either -1 or
11709 0 with -1, change it to the opposite comparison against zero. */
11710 if (const_op == -1
11711 && (code == EQ || code == NE || code == GT || code == LE
11712 || code == GEU || code == LTU)
11713 && is_a <scalar_int_mode> (mode, &int_mode)
11714 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11716 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11717 const_op = 0;
11720 /* Do some canonicalizations based on the comparison code. We prefer
11721 comparisons against zero and then prefer equality comparisons.
11722 If we can reduce the size of a constant, we will do that too. */
11723 switch (code)
11725 case LT:
11726 /* < C is equivalent to <= (C - 1) */
11727 if (const_op > 0)
11729 const_op -= 1;
11730 code = LE;
11731 /* ... fall through to LE case below. */
11732 gcc_fallthrough ();
11734 else
11735 break;
11737 case LE:
11738 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11739 if (const_op < 0)
11741 const_op += 1;
11742 code = LT;
11745 /* If we are doing a <= 0 comparison on a value known to have
11746 a zero sign bit, we can replace this with == 0. */
11747 else if (const_op == 0
11748 && is_a <scalar_int_mode> (mode, &int_mode)
11749 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11750 && (nonzero_bits (op0, int_mode)
11751 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11752 == 0)
11753 code = EQ;
11754 break;
11756 case GE:
11757 /* >= C is equivalent to > (C - 1). */
11758 if (const_op > 0)
11760 const_op -= 1;
11761 code = GT;
11762 /* ... fall through to GT below. */
11763 gcc_fallthrough ();
11765 else
11766 break;
11768 case GT:
11769 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11770 if (const_op < 0)
11772 const_op += 1;
11773 code = GE;
11776 /* If we are doing a > 0 comparison on a value known to have
11777 a zero sign bit, we can replace this with != 0. */
11778 else if (const_op == 0
11779 && is_a <scalar_int_mode> (mode, &int_mode)
11780 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11781 && (nonzero_bits (op0, int_mode)
11782 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11783 == 0)
11784 code = NE;
11785 break;
11787 case LTU:
11788 /* < C is equivalent to <= (C - 1). */
11789 if (const_op > 0)
11791 const_op -= 1;
11792 code = LEU;
11793 /* ... fall through ... */
11794 gcc_fallthrough ();
11796 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11797 else if (is_a <scalar_int_mode> (mode, &int_mode)
11798 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11799 && ((unsigned HOST_WIDE_INT) const_op
11800 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11802 const_op = 0;
11803 code = GE;
11804 break;
11806 else
11807 break;
11809 case LEU:
11810 /* unsigned <= 0 is equivalent to == 0 */
11811 if (const_op == 0)
11812 code = EQ;
11813 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11814 else if (is_a <scalar_int_mode> (mode, &int_mode)
11815 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11816 && ((unsigned HOST_WIDE_INT) const_op
11817 == ((HOST_WIDE_INT_1U
11818 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11820 const_op = 0;
11821 code = GE;
11823 break;
11825 case GEU:
11826 /* >= C is equivalent to > (C - 1). */
11827 if (const_op > 1)
11829 const_op -= 1;
11830 code = GTU;
11831 /* ... fall through ... */
11832 gcc_fallthrough ();
11835 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11836 else if (is_a <scalar_int_mode> (mode, &int_mode)
11837 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11838 && ((unsigned HOST_WIDE_INT) const_op
11839 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11841 const_op = 0;
11842 code = LT;
11843 break;
11845 else
11846 break;
11848 case GTU:
11849 /* unsigned > 0 is equivalent to != 0 */
11850 if (const_op == 0)
11851 code = NE;
11852 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11853 else if (is_a <scalar_int_mode> (mode, &int_mode)
11854 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11855 && ((unsigned HOST_WIDE_INT) const_op
11856 == (HOST_WIDE_INT_1U
11857 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11859 const_op = 0;
11860 code = LT;
11862 break;
11864 default:
11865 break;
11868 *pop1 = GEN_INT (const_op);
11869 return code;
11872 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11873 comparison code that will be tested.
11875 The result is a possibly different comparison code to use. *POP0 and
11876 *POP1 may be updated.
11878 It is possible that we might detect that a comparison is either always
11879 true or always false. However, we do not perform general constant
11880 folding in combine, so this knowledge isn't useful. Such tautologies
11881 should have been detected earlier. Hence we ignore all such cases. */
11883 static enum rtx_code
11884 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11886 rtx op0 = *pop0;
11887 rtx op1 = *pop1;
11888 rtx tem, tem1;
11889 int i;
11890 scalar_int_mode mode, inner_mode, tmode;
11891 opt_scalar_int_mode tmode_iter;
11893 /* Try a few ways of applying the same transformation to both operands. */
11894 while (1)
11896 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11897 so check specially. */
11898 if (!WORD_REGISTER_OPERATIONS
11899 && code != GTU && code != GEU && code != LTU && code != LEU
11900 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11901 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11902 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11903 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11904 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11905 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11906 && (is_a <scalar_int_mode>
11907 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11908 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11909 && CONST_INT_P (XEXP (op0, 1))
11910 && XEXP (op0, 1) == XEXP (op1, 1)
11911 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11912 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11913 && (INTVAL (XEXP (op0, 1))
11914 == (GET_MODE_PRECISION (mode)
11915 - GET_MODE_PRECISION (inner_mode))))
11917 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11918 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11921 /* If both operands are the same constant shift, see if we can ignore the
11922 shift. We can if the shift is a rotate or if the bits shifted out of
11923 this shift are known to be zero for both inputs and if the type of
11924 comparison is compatible with the shift. */
11925 if (GET_CODE (op0) == GET_CODE (op1)
11926 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11927 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11928 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11929 && (code != GT && code != LT && code != GE && code != LE))
11930 || (GET_CODE (op0) == ASHIFTRT
11931 && (code != GTU && code != LTU
11932 && code != GEU && code != LEU)))
11933 && CONST_INT_P (XEXP (op0, 1))
11934 && INTVAL (XEXP (op0, 1)) >= 0
11935 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11936 && XEXP (op0, 1) == XEXP (op1, 1))
11938 machine_mode mode = GET_MODE (op0);
11939 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11940 int shift_count = INTVAL (XEXP (op0, 1));
11942 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11943 mask &= (mask >> shift_count) << shift_count;
11944 else if (GET_CODE (op0) == ASHIFT)
11945 mask = (mask & (mask << shift_count)) >> shift_count;
11947 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11948 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11949 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11950 else
11951 break;
11954 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11955 SUBREGs are of the same mode, and, in both cases, the AND would
11956 be redundant if the comparison was done in the narrower mode,
11957 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11958 and the operand's possibly nonzero bits are 0xffffff01; in that case
11959 if we only care about QImode, we don't need the AND). This case
11960 occurs if the output mode of an scc insn is not SImode and
11961 STORE_FLAG_VALUE == 1 (e.g., the 386).
11963 Similarly, check for a case where the AND's are ZERO_EXTEND
11964 operations from some narrower mode even though a SUBREG is not
11965 present. */
11967 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11968 && CONST_INT_P (XEXP (op0, 1))
11969 && CONST_INT_P (XEXP (op1, 1)))
11971 rtx inner_op0 = XEXP (op0, 0);
11972 rtx inner_op1 = XEXP (op1, 0);
11973 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11974 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11975 int changed = 0;
11977 if (paradoxical_subreg_p (inner_op0)
11978 && GET_CODE (inner_op1) == SUBREG
11979 && (GET_MODE (SUBREG_REG (inner_op0))
11980 == GET_MODE (SUBREG_REG (inner_op1)))
11981 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11982 <= HOST_BITS_PER_WIDE_INT)
11983 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11984 GET_MODE (SUBREG_REG (inner_op0)))))
11985 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11986 GET_MODE (SUBREG_REG (inner_op1))))))
11988 op0 = SUBREG_REG (inner_op0);
11989 op1 = SUBREG_REG (inner_op1);
11991 /* The resulting comparison is always unsigned since we masked
11992 off the original sign bit. */
11993 code = unsigned_condition (code);
11995 changed = 1;
11998 else if (c0 == c1)
11999 FOR_EACH_MODE_UNTIL (tmode,
12000 as_a <scalar_int_mode> (GET_MODE (op0)))
12001 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12003 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12004 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12005 code = unsigned_condition (code);
12006 changed = 1;
12007 break;
12010 if (! changed)
12011 break;
12014 /* If both operands are NOT, we can strip off the outer operation
12015 and adjust the comparison code for swapped operands; similarly for
12016 NEG, except that this must be an equality comparison. */
12017 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12018 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12019 && (code == EQ || code == NE)))
12020 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12022 else
12023 break;
12026 /* If the first operand is a constant, swap the operands and adjust the
12027 comparison code appropriately, but don't do this if the second operand
12028 is already a constant integer. */
12029 if (swap_commutative_operands_p (op0, op1))
12031 std::swap (op0, op1);
12032 code = swap_condition (code);
12035 /* We now enter a loop during which we will try to simplify the comparison.
12036 For the most part, we only are concerned with comparisons with zero,
12037 but some things may really be comparisons with zero but not start
12038 out looking that way. */
12040 while (CONST_INT_P (op1))
12042 machine_mode raw_mode = GET_MODE (op0);
12043 scalar_int_mode int_mode;
12044 int equality_comparison_p;
12045 int sign_bit_comparison_p;
12046 int unsigned_comparison_p;
12047 HOST_WIDE_INT const_op;
12049 /* We only want to handle integral modes. This catches VOIDmode,
12050 CCmode, and the floating-point modes. An exception is that we
12051 can handle VOIDmode if OP0 is a COMPARE or a comparison
12052 operation. */
12054 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12055 && ! (raw_mode == VOIDmode
12056 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12057 break;
12059 /* Try to simplify the compare to constant, possibly changing the
12060 comparison op, and/or changing op1 to zero. */
12061 code = simplify_compare_const (code, raw_mode, op0, &op1);
12062 const_op = INTVAL (op1);
12064 /* Compute some predicates to simplify code below. */
12066 equality_comparison_p = (code == EQ || code == NE);
12067 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12068 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12069 || code == GEU);
12071 /* If this is a sign bit comparison and we can do arithmetic in
12072 MODE, say that we will only be needing the sign bit of OP0. */
12073 if (sign_bit_comparison_p
12074 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12075 && HWI_COMPUTABLE_MODE_P (int_mode))
12076 op0 = force_to_mode (op0, int_mode,
12077 HOST_WIDE_INT_1U
12078 << (GET_MODE_PRECISION (int_mode) - 1),
12081 if (COMPARISON_P (op0))
12083 /* We can't do anything if OP0 is a condition code value, rather
12084 than an actual data value. */
12085 if (const_op != 0
12086 || CC0_P (XEXP (op0, 0))
12087 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12088 break;
12090 /* Get the two operands being compared. */
12091 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12092 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12093 else
12094 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12096 /* Check for the cases where we simply want the result of the
12097 earlier test or the opposite of that result. */
12098 if (code == NE || code == EQ
12099 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12100 && (code == LT || code == GE)))
12102 enum rtx_code new_code;
12103 if (code == LT || code == NE)
12104 new_code = GET_CODE (op0);
12105 else
12106 new_code = reversed_comparison_code (op0, NULL);
12108 if (new_code != UNKNOWN)
12110 code = new_code;
12111 op0 = tem;
12112 op1 = tem1;
12113 continue;
12116 break;
12119 if (raw_mode == VOIDmode)
12120 break;
12121 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12123 /* Now try cases based on the opcode of OP0. If none of the cases
12124 does a "continue", we exit this loop immediately after the
12125 switch. */
12127 unsigned int mode_width = GET_MODE_PRECISION (mode);
12128 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12129 switch (GET_CODE (op0))
12131 case ZERO_EXTRACT:
12132 /* If we are extracting a single bit from a variable position in
12133 a constant that has only a single bit set and are comparing it
12134 with zero, we can convert this into an equality comparison
12135 between the position and the location of the single bit. */
12136 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12137 have already reduced the shift count modulo the word size. */
12138 if (!SHIFT_COUNT_TRUNCATED
12139 && CONST_INT_P (XEXP (op0, 0))
12140 && XEXP (op0, 1) == const1_rtx
12141 && equality_comparison_p && const_op == 0
12142 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12144 if (BITS_BIG_ENDIAN)
12145 i = BITS_PER_WORD - 1 - i;
12147 op0 = XEXP (op0, 2);
12148 op1 = GEN_INT (i);
12149 const_op = i;
12151 /* Result is nonzero iff shift count is equal to I. */
12152 code = reverse_condition (code);
12153 continue;
12156 /* fall through */
12158 case SIGN_EXTRACT:
12159 tem = expand_compound_operation (op0);
12160 if (tem != op0)
12162 op0 = tem;
12163 continue;
12165 break;
12167 case NOT:
12168 /* If testing for equality, we can take the NOT of the constant. */
12169 if (equality_comparison_p
12170 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12172 op0 = XEXP (op0, 0);
12173 op1 = tem;
12174 continue;
12177 /* If just looking at the sign bit, reverse the sense of the
12178 comparison. */
12179 if (sign_bit_comparison_p)
12181 op0 = XEXP (op0, 0);
12182 code = (code == GE ? LT : GE);
12183 continue;
12185 break;
12187 case NEG:
12188 /* If testing for equality, we can take the NEG of the constant. */
12189 if (equality_comparison_p
12190 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12192 op0 = XEXP (op0, 0);
12193 op1 = tem;
12194 continue;
12197 /* The remaining cases only apply to comparisons with zero. */
12198 if (const_op != 0)
12199 break;
12201 /* When X is ABS or is known positive,
12202 (neg X) is < 0 if and only if X != 0. */
12204 if (sign_bit_comparison_p
12205 && (GET_CODE (XEXP (op0, 0)) == ABS
12206 || (mode_width <= HOST_BITS_PER_WIDE_INT
12207 && (nonzero_bits (XEXP (op0, 0), mode)
12208 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12209 == 0)))
12211 op0 = XEXP (op0, 0);
12212 code = (code == LT ? NE : EQ);
12213 continue;
12216 /* If we have NEG of something whose two high-order bits are the
12217 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12218 if (num_sign_bit_copies (op0, mode) >= 2)
12220 op0 = XEXP (op0, 0);
12221 code = swap_condition (code);
12222 continue;
12224 break;
12226 case ROTATE:
12227 /* If we are testing equality and our count is a constant, we
12228 can perform the inverse operation on our RHS. */
12229 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12230 && (tem = simplify_binary_operation (ROTATERT, mode,
12231 op1, XEXP (op0, 1))) != 0)
12233 op0 = XEXP (op0, 0);
12234 op1 = tem;
12235 continue;
12238 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12239 a particular bit. Convert it to an AND of a constant of that
12240 bit. This will be converted into a ZERO_EXTRACT. */
12241 if (const_op == 0 && sign_bit_comparison_p
12242 && CONST_INT_P (XEXP (op0, 1))
12243 && mode_width <= HOST_BITS_PER_WIDE_INT)
12245 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12246 (HOST_WIDE_INT_1U
12247 << (mode_width - 1
12248 - INTVAL (XEXP (op0, 1)))));
12249 code = (code == LT ? NE : EQ);
12250 continue;
12253 /* Fall through. */
12255 case ABS:
12256 /* ABS is ignorable inside an equality comparison with zero. */
12257 if (const_op == 0 && equality_comparison_p)
12259 op0 = XEXP (op0, 0);
12260 continue;
12262 break;
12264 case SIGN_EXTEND:
12265 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12266 (compare FOO CONST) if CONST fits in FOO's mode and we
12267 are either testing inequality or have an unsigned
12268 comparison with ZERO_EXTEND or a signed comparison with
12269 SIGN_EXTEND. But don't do it if we don't have a compare
12270 insn of the given mode, since we'd have to revert it
12271 later on, and then we wouldn't know whether to sign- or
12272 zero-extend. */
12273 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12274 && ! unsigned_comparison_p
12275 && HWI_COMPUTABLE_MODE_P (mode)
12276 && trunc_int_for_mode (const_op, mode) == const_op
12277 && have_insn_for (COMPARE, mode))
12279 op0 = XEXP (op0, 0);
12280 continue;
12282 break;
12284 case SUBREG:
12285 /* Check for the case where we are comparing A - C1 with C2, that is
12287 (subreg:MODE (plus (A) (-C1))) op (C2)
12289 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12290 comparison in the wider mode. One of the following two conditions
12291 must be true in order for this to be valid:
12293 1. The mode extension results in the same bit pattern being added
12294 on both sides and the comparison is equality or unsigned. As
12295 C2 has been truncated to fit in MODE, the pattern can only be
12296 all 0s or all 1s.
12298 2. The mode extension results in the sign bit being copied on
12299 each side.
12301 The difficulty here is that we have predicates for A but not for
12302 (A - C1) so we need to check that C1 is within proper bounds so
12303 as to perturbate A as little as possible. */
12305 if (mode_width <= HOST_BITS_PER_WIDE_INT
12306 && subreg_lowpart_p (op0)
12307 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12308 &inner_mode)
12309 && GET_MODE_PRECISION (inner_mode) > mode_width
12310 && GET_CODE (SUBREG_REG (op0)) == PLUS
12311 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12313 rtx a = XEXP (SUBREG_REG (op0), 0);
12314 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12316 if ((c1 > 0
12317 && (unsigned HOST_WIDE_INT) c1
12318 < HOST_WIDE_INT_1U << (mode_width - 1)
12319 && (equality_comparison_p || unsigned_comparison_p)
12320 /* (A - C1) zero-extends if it is positive and sign-extends
12321 if it is negative, C2 both zero- and sign-extends. */
12322 && ((0 == (nonzero_bits (a, inner_mode)
12323 & ~GET_MODE_MASK (mode))
12324 && const_op >= 0)
12325 /* (A - C1) sign-extends if it is positive and 1-extends
12326 if it is negative, C2 both sign- and 1-extends. */
12327 || (num_sign_bit_copies (a, inner_mode)
12328 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12329 - mode_width)
12330 && const_op < 0)))
12331 || ((unsigned HOST_WIDE_INT) c1
12332 < HOST_WIDE_INT_1U << (mode_width - 2)
12333 /* (A - C1) always sign-extends, like C2. */
12334 && num_sign_bit_copies (a, inner_mode)
12335 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12336 - (mode_width - 1))))
12338 op0 = SUBREG_REG (op0);
12339 continue;
12343 /* If the inner mode is narrower and we are extracting the low part,
12344 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12345 if (paradoxical_subreg_p (op0))
12347 else if (subreg_lowpart_p (op0)
12348 && GET_MODE_CLASS (mode) == MODE_INT
12349 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12350 && (code == NE || code == EQ)
12351 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12352 && !paradoxical_subreg_p (op0)
12353 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12354 & ~GET_MODE_MASK (mode)) == 0)
12356 /* Remove outer subregs that don't do anything. */
12357 tem = gen_lowpart (inner_mode, op1);
12359 if ((nonzero_bits (tem, inner_mode)
12360 & ~GET_MODE_MASK (mode)) == 0)
12362 op0 = SUBREG_REG (op0);
12363 op1 = tem;
12364 continue;
12366 break;
12368 else
12369 break;
12371 /* FALLTHROUGH */
12373 case ZERO_EXTEND:
12374 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12375 && (unsigned_comparison_p || equality_comparison_p)
12376 && HWI_COMPUTABLE_MODE_P (mode)
12377 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12378 && const_op >= 0
12379 && have_insn_for (COMPARE, mode))
12381 op0 = XEXP (op0, 0);
12382 continue;
12384 break;
12386 case PLUS:
12387 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12388 this for equality comparisons due to pathological cases involving
12389 overflows. */
12390 if (equality_comparison_p
12391 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12392 op1, XEXP (op0, 1))))
12394 op0 = XEXP (op0, 0);
12395 op1 = tem;
12396 continue;
12399 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12400 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12401 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12403 op0 = XEXP (XEXP (op0, 0), 0);
12404 code = (code == LT ? EQ : NE);
12405 continue;
12407 break;
12409 case MINUS:
12410 /* We used to optimize signed comparisons against zero, but that
12411 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12412 arrive here as equality comparisons, or (GEU, LTU) are
12413 optimized away. No need to special-case them. */
12415 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12416 (eq B (minus A C)), whichever simplifies. We can only do
12417 this for equality comparisons due to pathological cases involving
12418 overflows. */
12419 if (equality_comparison_p
12420 && 0 != (tem = simplify_binary_operation (PLUS, mode,
12421 XEXP (op0, 1), op1)))
12423 op0 = XEXP (op0, 0);
12424 op1 = tem;
12425 continue;
12428 if (equality_comparison_p
12429 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12430 XEXP (op0, 0), op1)))
12432 op0 = XEXP (op0, 1);
12433 op1 = tem;
12434 continue;
12437 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12438 of bits in X minus 1, is one iff X > 0. */
12439 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12440 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12441 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12442 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12444 op0 = XEXP (op0, 1);
12445 code = (code == GE ? LE : GT);
12446 continue;
12448 break;
12450 case XOR:
12451 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12452 if C is zero or B is a constant. */
12453 if (equality_comparison_p
12454 && 0 != (tem = simplify_binary_operation (XOR, mode,
12455 XEXP (op0, 1), op1)))
12457 op0 = XEXP (op0, 0);
12458 op1 = tem;
12459 continue;
12461 break;
12464 case IOR:
12465 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12466 iff X <= 0. */
12467 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12468 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12469 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12471 op0 = XEXP (op0, 1);
12472 code = (code == GE ? GT : LE);
12473 continue;
12475 break;
12477 case AND:
12478 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12479 will be converted to a ZERO_EXTRACT later. */
12480 if (const_op == 0 && equality_comparison_p
12481 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12482 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12484 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12485 XEXP (XEXP (op0, 0), 1));
12486 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12487 continue;
12490 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12491 zero and X is a comparison and C1 and C2 describe only bits set
12492 in STORE_FLAG_VALUE, we can compare with X. */
12493 if (const_op == 0 && equality_comparison_p
12494 && mode_width <= HOST_BITS_PER_WIDE_INT
12495 && CONST_INT_P (XEXP (op0, 1))
12496 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12497 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12498 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12499 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12501 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12502 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12503 if ((~STORE_FLAG_VALUE & mask) == 0
12504 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12505 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12506 && COMPARISON_P (tem))))
12508 op0 = XEXP (XEXP (op0, 0), 0);
12509 continue;
12513 /* If we are doing an equality comparison of an AND of a bit equal
12514 to the sign bit, replace this with a LT or GE comparison of
12515 the underlying value. */
12516 if (equality_comparison_p
12517 && const_op == 0
12518 && CONST_INT_P (XEXP (op0, 1))
12519 && mode_width <= HOST_BITS_PER_WIDE_INT
12520 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12521 == HOST_WIDE_INT_1U << (mode_width - 1)))
12523 op0 = XEXP (op0, 0);
12524 code = (code == EQ ? GE : LT);
12525 continue;
12528 /* If this AND operation is really a ZERO_EXTEND from a narrower
12529 mode, the constant fits within that mode, and this is either an
12530 equality or unsigned comparison, try to do this comparison in
12531 the narrower mode.
12533 Note that in:
12535 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12536 -> (ne:DI (reg:SI 4) (const_int 0))
12538 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12539 known to hold a value of the required mode the
12540 transformation is invalid. */
12541 if ((equality_comparison_p || unsigned_comparison_p)
12542 && CONST_INT_P (XEXP (op0, 1))
12543 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12544 & GET_MODE_MASK (mode))
12545 + 1)) >= 0
12546 && const_op >> i == 0
12547 && int_mode_for_size (i, 1).exists (&tmode))
12549 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12550 continue;
12553 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12554 fits in both M1 and M2 and the SUBREG is either paradoxical
12555 or represents the low part, permute the SUBREG and the AND
12556 and try again. */
12557 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12558 && CONST_INT_P (XEXP (op0, 1)))
12560 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12561 /* Require an integral mode, to avoid creating something like
12562 (AND:SF ...). */
12563 if ((is_a <scalar_int_mode>
12564 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12565 /* It is unsafe to commute the AND into the SUBREG if the
12566 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12567 not defined. As originally written the upper bits
12568 have a defined value due to the AND operation.
12569 However, if we commute the AND inside the SUBREG then
12570 they no longer have defined values and the meaning of
12571 the code has been changed.
12572 Also C1 should not change value in the smaller mode,
12573 see PR67028 (a positive C1 can become negative in the
12574 smaller mode, so that the AND does no longer mask the
12575 upper bits). */
12576 && ((WORD_REGISTER_OPERATIONS
12577 && mode_width > GET_MODE_PRECISION (tmode)
12578 && mode_width <= BITS_PER_WORD
12579 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12580 || (mode_width <= GET_MODE_PRECISION (tmode)
12581 && subreg_lowpart_p (XEXP (op0, 0))))
12582 && mode_width <= HOST_BITS_PER_WIDE_INT
12583 && HWI_COMPUTABLE_MODE_P (tmode)
12584 && (c1 & ~mask) == 0
12585 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12586 && c1 != mask
12587 && c1 != GET_MODE_MASK (tmode))
12589 op0 = simplify_gen_binary (AND, tmode,
12590 SUBREG_REG (XEXP (op0, 0)),
12591 gen_int_mode (c1, tmode));
12592 op0 = gen_lowpart (mode, op0);
12593 continue;
12597 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12598 if (const_op == 0 && equality_comparison_p
12599 && XEXP (op0, 1) == const1_rtx
12600 && GET_CODE (XEXP (op0, 0)) == NOT)
12602 op0 = simplify_and_const_int (NULL_RTX, mode,
12603 XEXP (XEXP (op0, 0), 0), 1);
12604 code = (code == NE ? EQ : NE);
12605 continue;
12608 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12609 (eq (and (lshiftrt X) 1) 0).
12610 Also handle the case where (not X) is expressed using xor. */
12611 if (const_op == 0 && equality_comparison_p
12612 && XEXP (op0, 1) == const1_rtx
12613 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12615 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12616 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12618 if (GET_CODE (shift_op) == NOT
12619 || (GET_CODE (shift_op) == XOR
12620 && CONST_INT_P (XEXP (shift_op, 1))
12621 && CONST_INT_P (shift_count)
12622 && HWI_COMPUTABLE_MODE_P (mode)
12623 && (UINTVAL (XEXP (shift_op, 1))
12624 == HOST_WIDE_INT_1U
12625 << INTVAL (shift_count))))
12628 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12629 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12630 code = (code == NE ? EQ : NE);
12631 continue;
12634 break;
12636 case ASHIFT:
12637 /* If we have (compare (ashift FOO N) (const_int C)) and
12638 the high order N bits of FOO (N+1 if an inequality comparison)
12639 are known to be zero, we can do this by comparing FOO with C
12640 shifted right N bits so long as the low-order N bits of C are
12641 zero. */
12642 if (CONST_INT_P (XEXP (op0, 1))
12643 && INTVAL (XEXP (op0, 1)) >= 0
12644 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12645 < HOST_BITS_PER_WIDE_INT)
12646 && (((unsigned HOST_WIDE_INT) const_op
12647 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12648 - 1)) == 0)
12649 && mode_width <= HOST_BITS_PER_WIDE_INT
12650 && (nonzero_bits (XEXP (op0, 0), mode)
12651 & ~(mask >> (INTVAL (XEXP (op0, 1))
12652 + ! equality_comparison_p))) == 0)
12654 /* We must perform a logical shift, not an arithmetic one,
12655 as we want the top N bits of C to be zero. */
12656 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12658 temp >>= INTVAL (XEXP (op0, 1));
12659 op1 = gen_int_mode (temp, mode);
12660 op0 = XEXP (op0, 0);
12661 continue;
12664 /* If we are doing a sign bit comparison, it means we are testing
12665 a particular bit. Convert it to the appropriate AND. */
12666 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12667 && mode_width <= HOST_BITS_PER_WIDE_INT)
12669 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12670 (HOST_WIDE_INT_1U
12671 << (mode_width - 1
12672 - INTVAL (XEXP (op0, 1)))));
12673 code = (code == LT ? NE : EQ);
12674 continue;
12677 /* If this an equality comparison with zero and we are shifting
12678 the low bit to the sign bit, we can convert this to an AND of the
12679 low-order bit. */
12680 if (const_op == 0 && equality_comparison_p
12681 && CONST_INT_P (XEXP (op0, 1))
12682 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12684 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12685 continue;
12687 break;
12689 case ASHIFTRT:
12690 /* If this is an equality comparison with zero, we can do this
12691 as a logical shift, which might be much simpler. */
12692 if (equality_comparison_p && const_op == 0
12693 && CONST_INT_P (XEXP (op0, 1)))
12695 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12696 XEXP (op0, 0),
12697 INTVAL (XEXP (op0, 1)));
12698 continue;
12701 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12702 do the comparison in a narrower mode. */
12703 if (! unsigned_comparison_p
12704 && CONST_INT_P (XEXP (op0, 1))
12705 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12706 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12707 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12708 .exists (&tmode))
12709 && (((unsigned HOST_WIDE_INT) const_op
12710 + (GET_MODE_MASK (tmode) >> 1) + 1)
12711 <= GET_MODE_MASK (tmode)))
12713 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12714 continue;
12717 /* Likewise if OP0 is a PLUS of a sign extension with a
12718 constant, which is usually represented with the PLUS
12719 between the shifts. */
12720 if (! unsigned_comparison_p
12721 && CONST_INT_P (XEXP (op0, 1))
12722 && GET_CODE (XEXP (op0, 0)) == PLUS
12723 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12724 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12725 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12726 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12727 .exists (&tmode))
12728 && (((unsigned HOST_WIDE_INT) const_op
12729 + (GET_MODE_MASK (tmode) >> 1) + 1)
12730 <= GET_MODE_MASK (tmode)))
12732 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12733 rtx add_const = XEXP (XEXP (op0, 0), 1);
12734 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12735 add_const, XEXP (op0, 1));
12737 op0 = simplify_gen_binary (PLUS, tmode,
12738 gen_lowpart (tmode, inner),
12739 new_const);
12740 continue;
12743 /* FALLTHROUGH */
12744 case LSHIFTRT:
12745 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12746 the low order N bits of FOO are known to be zero, we can do this
12747 by comparing FOO with C shifted left N bits so long as no
12748 overflow occurs. Even if the low order N bits of FOO aren't known
12749 to be zero, if the comparison is >= or < we can use the same
12750 optimization and for > or <= by setting all the low
12751 order N bits in the comparison constant. */
12752 if (CONST_INT_P (XEXP (op0, 1))
12753 && INTVAL (XEXP (op0, 1)) > 0
12754 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12755 && mode_width <= HOST_BITS_PER_WIDE_INT
12756 && (((unsigned HOST_WIDE_INT) const_op
12757 + (GET_CODE (op0) != LSHIFTRT
12758 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12759 + 1)
12760 : 0))
12761 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12763 unsigned HOST_WIDE_INT low_bits
12764 = (nonzero_bits (XEXP (op0, 0), mode)
12765 & ((HOST_WIDE_INT_1U
12766 << INTVAL (XEXP (op0, 1))) - 1));
12767 if (low_bits == 0 || !equality_comparison_p)
12769 /* If the shift was logical, then we must make the condition
12770 unsigned. */
12771 if (GET_CODE (op0) == LSHIFTRT)
12772 code = unsigned_condition (code);
12774 const_op = (unsigned HOST_WIDE_INT) const_op
12775 << INTVAL (XEXP (op0, 1));
12776 if (low_bits != 0
12777 && (code == GT || code == GTU
12778 || code == LE || code == LEU))
12779 const_op
12780 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12781 op1 = GEN_INT (const_op);
12782 op0 = XEXP (op0, 0);
12783 continue;
12787 /* If we are using this shift to extract just the sign bit, we
12788 can replace this with an LT or GE comparison. */
12789 if (const_op == 0
12790 && (equality_comparison_p || sign_bit_comparison_p)
12791 && CONST_INT_P (XEXP (op0, 1))
12792 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12794 op0 = XEXP (op0, 0);
12795 code = (code == NE || code == GT ? LT : GE);
12796 continue;
12798 break;
12800 default:
12801 break;
12804 break;
12807 /* Now make any compound operations involved in this comparison. Then,
12808 check for an outmost SUBREG on OP0 that is not doing anything or is
12809 paradoxical. The latter transformation must only be performed when
12810 it is known that the "extra" bits will be the same in op0 and op1 or
12811 that they don't matter. There are three cases to consider:
12813 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12814 care bits and we can assume they have any convenient value. So
12815 making the transformation is safe.
12817 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12818 In this case the upper bits of op0 are undefined. We should not make
12819 the simplification in that case as we do not know the contents of
12820 those bits.
12822 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12823 In that case we know those bits are zeros or ones. We must also be
12824 sure that they are the same as the upper bits of op1.
12826 We can never remove a SUBREG for a non-equality comparison because
12827 the sign bit is in a different place in the underlying object. */
12829 rtx_code op0_mco_code = SET;
12830 if (op1 == const0_rtx)
12831 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12833 op0 = make_compound_operation (op0, op0_mco_code);
12834 op1 = make_compound_operation (op1, SET);
12836 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12837 && is_int_mode (GET_MODE (op0), &mode)
12838 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12839 && (code == NE || code == EQ))
12841 if (paradoxical_subreg_p (op0))
12843 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12844 implemented. */
12845 if (REG_P (SUBREG_REG (op0)))
12847 op0 = SUBREG_REG (op0);
12848 op1 = gen_lowpart (inner_mode, op1);
12851 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12852 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12853 & ~GET_MODE_MASK (mode)) == 0)
12855 tem = gen_lowpart (inner_mode, op1);
12857 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12858 op0 = SUBREG_REG (op0), op1 = tem;
12862 /* We now do the opposite procedure: Some machines don't have compare
12863 insns in all modes. If OP0's mode is an integer mode smaller than a
12864 word and we can't do a compare in that mode, see if there is a larger
12865 mode for which we can do the compare. There are a number of cases in
12866 which we can use the wider mode. */
12868 if (is_int_mode (GET_MODE (op0), &mode)
12869 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12870 && ! have_insn_for (COMPARE, mode))
12871 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12873 tmode = tmode_iter.require ();
12874 if (!HWI_COMPUTABLE_MODE_P (tmode))
12875 break;
12876 if (have_insn_for (COMPARE, tmode))
12878 int zero_extended;
12880 /* If this is a test for negative, we can make an explicit
12881 test of the sign bit. Test this first so we can use
12882 a paradoxical subreg to extend OP0. */
12884 if (op1 == const0_rtx && (code == LT || code == GE)
12885 && HWI_COMPUTABLE_MODE_P (mode))
12887 unsigned HOST_WIDE_INT sign
12888 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12889 op0 = simplify_gen_binary (AND, tmode,
12890 gen_lowpart (tmode, op0),
12891 gen_int_mode (sign, tmode));
12892 code = (code == LT) ? NE : EQ;
12893 break;
12896 /* If the only nonzero bits in OP0 and OP1 are those in the
12897 narrower mode and this is an equality or unsigned comparison,
12898 we can use the wider mode. Similarly for sign-extended
12899 values, in which case it is true for all comparisons. */
12900 zero_extended = ((code == EQ || code == NE
12901 || code == GEU || code == GTU
12902 || code == LEU || code == LTU)
12903 && (nonzero_bits (op0, tmode)
12904 & ~GET_MODE_MASK (mode)) == 0
12905 && ((CONST_INT_P (op1)
12906 || (nonzero_bits (op1, tmode)
12907 & ~GET_MODE_MASK (mode)) == 0)));
12909 if (zero_extended
12910 || ((num_sign_bit_copies (op0, tmode)
12911 > (unsigned int) (GET_MODE_PRECISION (tmode)
12912 - GET_MODE_PRECISION (mode)))
12913 && (num_sign_bit_copies (op1, tmode)
12914 > (unsigned int) (GET_MODE_PRECISION (tmode)
12915 - GET_MODE_PRECISION (mode)))))
12917 /* If OP0 is an AND and we don't have an AND in MODE either,
12918 make a new AND in the proper mode. */
12919 if (GET_CODE (op0) == AND
12920 && !have_insn_for (AND, mode))
12921 op0 = simplify_gen_binary (AND, tmode,
12922 gen_lowpart (tmode,
12923 XEXP (op0, 0)),
12924 gen_lowpart (tmode,
12925 XEXP (op0, 1)));
12926 else
12928 if (zero_extended)
12930 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
12931 op0, mode);
12932 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
12933 op1, mode);
12935 else
12937 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
12938 op0, mode);
12939 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
12940 op1, mode);
12942 break;
12948 /* We may have changed the comparison operands. Re-canonicalize. */
12949 if (swap_commutative_operands_p (op0, op1))
12951 std::swap (op0, op1);
12952 code = swap_condition (code);
12955 /* If this machine only supports a subset of valid comparisons, see if we
12956 can convert an unsupported one into a supported one. */
12957 target_canonicalize_comparison (&code, &op0, &op1, 0);
12959 *pop0 = op0;
12960 *pop1 = op1;
12962 return code;
12965 /* Utility function for record_value_for_reg. Count number of
12966 rtxs in X. */
12967 static int
12968 count_rtxs (rtx x)
12970 enum rtx_code code = GET_CODE (x);
12971 const char *fmt;
12972 int i, j, ret = 1;
12974 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12975 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12977 rtx x0 = XEXP (x, 0);
12978 rtx x1 = XEXP (x, 1);
12980 if (x0 == x1)
12981 return 1 + 2 * count_rtxs (x0);
12983 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12984 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12985 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12986 return 2 + 2 * count_rtxs (x0)
12987 + count_rtxs (x == XEXP (x1, 0)
12988 ? XEXP (x1, 1) : XEXP (x1, 0));
12990 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12991 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12992 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12993 return 2 + 2 * count_rtxs (x1)
12994 + count_rtxs (x == XEXP (x0, 0)
12995 ? XEXP (x0, 1) : XEXP (x0, 0));
12998 fmt = GET_RTX_FORMAT (code);
12999 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13000 if (fmt[i] == 'e')
13001 ret += count_rtxs (XEXP (x, i));
13002 else if (fmt[i] == 'E')
13003 for (j = 0; j < XVECLEN (x, i); j++)
13004 ret += count_rtxs (XVECEXP (x, i, j));
13006 return ret;
13009 /* Utility function for following routine. Called when X is part of a value
13010 being stored into last_set_value. Sets last_set_table_tick
13011 for each register mentioned. Similar to mention_regs in cse.c */
13013 static void
13014 update_table_tick (rtx x)
13016 enum rtx_code code = GET_CODE (x);
13017 const char *fmt = GET_RTX_FORMAT (code);
13018 int i, j;
13020 if (code == REG)
13022 unsigned int regno = REGNO (x);
13023 unsigned int endregno = END_REGNO (x);
13024 unsigned int r;
13026 for (r = regno; r < endregno; r++)
13028 reg_stat_type *rsp = &reg_stat[r];
13029 rsp->last_set_table_tick = label_tick;
13032 return;
13035 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13036 if (fmt[i] == 'e')
13038 /* Check for identical subexpressions. If x contains
13039 identical subexpression we only have to traverse one of
13040 them. */
13041 if (i == 0 && ARITHMETIC_P (x))
13043 /* Note that at this point x1 has already been
13044 processed. */
13045 rtx x0 = XEXP (x, 0);
13046 rtx x1 = XEXP (x, 1);
13048 /* If x0 and x1 are identical then there is no need to
13049 process x0. */
13050 if (x0 == x1)
13051 break;
13053 /* If x0 is identical to a subexpression of x1 then while
13054 processing x1, x0 has already been processed. Thus we
13055 are done with x. */
13056 if (ARITHMETIC_P (x1)
13057 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13058 break;
13060 /* If x1 is identical to a subexpression of x0 then we
13061 still have to process the rest of x0. */
13062 if (ARITHMETIC_P (x0)
13063 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13065 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13066 break;
13070 update_table_tick (XEXP (x, i));
13072 else if (fmt[i] == 'E')
13073 for (j = 0; j < XVECLEN (x, i); j++)
13074 update_table_tick (XVECEXP (x, i, j));
13077 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13078 are saying that the register is clobbered and we no longer know its
13079 value. If INSN is zero, don't update reg_stat[].last_set; this is
13080 only permitted with VALUE also zero and is used to invalidate the
13081 register. */
13083 static void
13084 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13086 unsigned int regno = REGNO (reg);
13087 unsigned int endregno = END_REGNO (reg);
13088 unsigned int i;
13089 reg_stat_type *rsp;
13091 /* If VALUE contains REG and we have a previous value for REG, substitute
13092 the previous value. */
13093 if (value && insn && reg_overlap_mentioned_p (reg, value))
13095 rtx tem;
13097 /* Set things up so get_last_value is allowed to see anything set up to
13098 our insn. */
13099 subst_low_luid = DF_INSN_LUID (insn);
13100 tem = get_last_value (reg);
13102 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13103 it isn't going to be useful and will take a lot of time to process,
13104 so just use the CLOBBER. */
13106 if (tem)
13108 if (ARITHMETIC_P (tem)
13109 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13110 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13111 tem = XEXP (tem, 0);
13112 else if (count_occurrences (value, reg, 1) >= 2)
13114 /* If there are two or more occurrences of REG in VALUE,
13115 prevent the value from growing too much. */
13116 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13117 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13120 value = replace_rtx (copy_rtx (value), reg, tem);
13124 /* For each register modified, show we don't know its value, that
13125 we don't know about its bitwise content, that its value has been
13126 updated, and that we don't know the location of the death of the
13127 register. */
13128 for (i = regno; i < endregno; i++)
13130 rsp = &reg_stat[i];
13132 if (insn)
13133 rsp->last_set = insn;
13135 rsp->last_set_value = 0;
13136 rsp->last_set_mode = VOIDmode;
13137 rsp->last_set_nonzero_bits = 0;
13138 rsp->last_set_sign_bit_copies = 0;
13139 rsp->last_death = 0;
13140 rsp->truncated_to_mode = VOIDmode;
13143 /* Mark registers that are being referenced in this value. */
13144 if (value)
13145 update_table_tick (value);
13147 /* Now update the status of each register being set.
13148 If someone is using this register in this block, set this register
13149 to invalid since we will get confused between the two lives in this
13150 basic block. This makes using this register always invalid. In cse, we
13151 scan the table to invalidate all entries using this register, but this
13152 is too much work for us. */
13154 for (i = regno; i < endregno; i++)
13156 rsp = &reg_stat[i];
13157 rsp->last_set_label = label_tick;
13158 if (!insn
13159 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13160 rsp->last_set_invalid = 1;
13161 else
13162 rsp->last_set_invalid = 0;
13165 /* The value being assigned might refer to X (like in "x++;"). In that
13166 case, we must replace it with (clobber (const_int 0)) to prevent
13167 infinite loops. */
13168 rsp = &reg_stat[regno];
13169 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13171 value = copy_rtx (value);
13172 if (!get_last_value_validate (&value, insn, label_tick, 1))
13173 value = 0;
13176 /* For the main register being modified, update the value, the mode, the
13177 nonzero bits, and the number of sign bit copies. */
13179 rsp->last_set_value = value;
13181 if (value)
13183 machine_mode mode = GET_MODE (reg);
13184 subst_low_luid = DF_INSN_LUID (insn);
13185 rsp->last_set_mode = mode;
13186 if (GET_MODE_CLASS (mode) == MODE_INT
13187 && HWI_COMPUTABLE_MODE_P (mode))
13188 mode = nonzero_bits_mode;
13189 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13190 rsp->last_set_sign_bit_copies
13191 = num_sign_bit_copies (value, GET_MODE (reg));
13195 /* Called via note_stores from record_dead_and_set_regs to handle one
13196 SET or CLOBBER in an insn. DATA is the instruction in which the
13197 set is occurring. */
13199 static void
13200 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13202 rtx_insn *record_dead_insn = (rtx_insn *) data;
13204 if (GET_CODE (dest) == SUBREG)
13205 dest = SUBREG_REG (dest);
13207 if (!record_dead_insn)
13209 if (REG_P (dest))
13210 record_value_for_reg (dest, NULL, NULL_RTX);
13211 return;
13214 if (REG_P (dest))
13216 /* If we are setting the whole register, we know its value. Otherwise
13217 show that we don't know the value. We can handle SUBREG in
13218 some cases. */
13219 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13220 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13221 else if (GET_CODE (setter) == SET
13222 && GET_CODE (SET_DEST (setter)) == SUBREG
13223 && SUBREG_REG (SET_DEST (setter)) == dest
13224 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
13225 && subreg_lowpart_p (SET_DEST (setter)))
13226 record_value_for_reg (dest, record_dead_insn,
13227 gen_lowpart (GET_MODE (dest),
13228 SET_SRC (setter)));
13229 else
13230 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13232 else if (MEM_P (dest)
13233 /* Ignore pushes, they clobber nothing. */
13234 && ! push_operand (dest, GET_MODE (dest)))
13235 mem_last_set = DF_INSN_LUID (record_dead_insn);
13238 /* Update the records of when each REG was most recently set or killed
13239 for the things done by INSN. This is the last thing done in processing
13240 INSN in the combiner loop.
13242 We update reg_stat[], in particular fields last_set, last_set_value,
13243 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13244 last_death, and also the similar information mem_last_set (which insn
13245 most recently modified memory) and last_call_luid (which insn was the
13246 most recent subroutine call). */
13248 static void
13249 record_dead_and_set_regs (rtx_insn *insn)
13251 rtx link;
13252 unsigned int i;
13254 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13256 if (REG_NOTE_KIND (link) == REG_DEAD
13257 && REG_P (XEXP (link, 0)))
13259 unsigned int regno = REGNO (XEXP (link, 0));
13260 unsigned int endregno = END_REGNO (XEXP (link, 0));
13262 for (i = regno; i < endregno; i++)
13264 reg_stat_type *rsp;
13266 rsp = &reg_stat[i];
13267 rsp->last_death = insn;
13270 else if (REG_NOTE_KIND (link) == REG_INC)
13271 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13274 if (CALL_P (insn))
13276 hard_reg_set_iterator hrsi;
13277 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13279 reg_stat_type *rsp;
13281 rsp = &reg_stat[i];
13282 rsp->last_set_invalid = 1;
13283 rsp->last_set = insn;
13284 rsp->last_set_value = 0;
13285 rsp->last_set_mode = VOIDmode;
13286 rsp->last_set_nonzero_bits = 0;
13287 rsp->last_set_sign_bit_copies = 0;
13288 rsp->last_death = 0;
13289 rsp->truncated_to_mode = VOIDmode;
13292 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13294 /* We can't combine into a call pattern. Remember, though, that
13295 the return value register is set at this LUID. We could
13296 still replace a register with the return value from the
13297 wrong subroutine call! */
13298 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13300 else
13301 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13304 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13305 register present in the SUBREG, so for each such SUBREG go back and
13306 adjust nonzero and sign bit information of the registers that are
13307 known to have some zero/sign bits set.
13309 This is needed because when combine blows the SUBREGs away, the
13310 information on zero/sign bits is lost and further combines can be
13311 missed because of that. */
13313 static void
13314 record_promoted_value (rtx_insn *insn, rtx subreg)
13316 struct insn_link *links;
13317 rtx set;
13318 unsigned int regno = REGNO (SUBREG_REG (subreg));
13319 machine_mode mode = GET_MODE (subreg);
13321 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
13322 return;
13324 for (links = LOG_LINKS (insn); links;)
13326 reg_stat_type *rsp;
13328 insn = links->insn;
13329 set = single_set (insn);
13331 if (! set || !REG_P (SET_DEST (set))
13332 || REGNO (SET_DEST (set)) != regno
13333 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13335 links = links->next;
13336 continue;
13339 rsp = &reg_stat[regno];
13340 if (rsp->last_set == insn)
13342 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13343 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13346 if (REG_P (SET_SRC (set)))
13348 regno = REGNO (SET_SRC (set));
13349 links = LOG_LINKS (insn);
13351 else
13352 break;
13356 /* Check if X, a register, is known to contain a value already
13357 truncated to MODE. In this case we can use a subreg to refer to
13358 the truncated value even though in the generic case we would need
13359 an explicit truncation. */
13361 static bool
13362 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13364 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13365 machine_mode truncated = rsp->truncated_to_mode;
13367 if (truncated == 0
13368 || rsp->truncation_label < label_tick_ebb_start)
13369 return false;
13370 if (!partial_subreg_p (mode, truncated))
13371 return true;
13372 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13373 return true;
13374 return false;
13377 /* If X is a hard reg or a subreg record the mode that the register is
13378 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13379 able to turn a truncate into a subreg using this information. Return true
13380 if traversing X is complete. */
13382 static bool
13383 record_truncated_value (rtx x)
13385 machine_mode truncated_mode;
13386 reg_stat_type *rsp;
13388 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13390 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13391 truncated_mode = GET_MODE (x);
13393 if (!partial_subreg_p (truncated_mode, original_mode))
13394 return true;
13396 truncated_mode = GET_MODE (x);
13397 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13398 return true;
13400 x = SUBREG_REG (x);
13402 /* ??? For hard-regs we now record everything. We might be able to
13403 optimize this using last_set_mode. */
13404 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13405 truncated_mode = GET_MODE (x);
13406 else
13407 return false;
13409 rsp = &reg_stat[REGNO (x)];
13410 if (rsp->truncated_to_mode == 0
13411 || rsp->truncation_label < label_tick_ebb_start
13412 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13414 rsp->truncated_to_mode = truncated_mode;
13415 rsp->truncation_label = label_tick;
13418 return true;
13421 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13422 the modes they are used in. This can help truning TRUNCATEs into
13423 SUBREGs. */
13425 static void
13426 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13428 subrtx_var_iterator::array_type array;
13429 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13430 if (record_truncated_value (*iter))
13431 iter.skip_subrtxes ();
13434 /* Scan X for promoted SUBREGs. For each one found,
13435 note what it implies to the registers used in it. */
13437 static void
13438 check_promoted_subreg (rtx_insn *insn, rtx x)
13440 if (GET_CODE (x) == SUBREG
13441 && SUBREG_PROMOTED_VAR_P (x)
13442 && REG_P (SUBREG_REG (x)))
13443 record_promoted_value (insn, x);
13444 else
13446 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13447 int i, j;
13449 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13450 switch (format[i])
13452 case 'e':
13453 check_promoted_subreg (insn, XEXP (x, i));
13454 break;
13455 case 'V':
13456 case 'E':
13457 if (XVEC (x, i) != 0)
13458 for (j = 0; j < XVECLEN (x, i); j++)
13459 check_promoted_subreg (insn, XVECEXP (x, i, j));
13460 break;
13465 /* Verify that all the registers and memory references mentioned in *LOC are
13466 still valid. *LOC was part of a value set in INSN when label_tick was
13467 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13468 the invalid references with (clobber (const_int 0)) and return 1. This
13469 replacement is useful because we often can get useful information about
13470 the form of a value (e.g., if it was produced by a shift that always
13471 produces -1 or 0) even though we don't know exactly what registers it
13472 was produced from. */
13474 static int
13475 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13477 rtx x = *loc;
13478 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13479 int len = GET_RTX_LENGTH (GET_CODE (x));
13480 int i, j;
13482 if (REG_P (x))
13484 unsigned int regno = REGNO (x);
13485 unsigned int endregno = END_REGNO (x);
13486 unsigned int j;
13488 for (j = regno; j < endregno; j++)
13490 reg_stat_type *rsp = &reg_stat[j];
13491 if (rsp->last_set_invalid
13492 /* If this is a pseudo-register that was only set once and not
13493 live at the beginning of the function, it is always valid. */
13494 || (! (regno >= FIRST_PSEUDO_REGISTER
13495 && regno < reg_n_sets_max
13496 && REG_N_SETS (regno) == 1
13497 && (!REGNO_REG_SET_P
13498 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13499 regno)))
13500 && rsp->last_set_label > tick))
13502 if (replace)
13503 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13504 return replace;
13508 return 1;
13510 /* If this is a memory reference, make sure that there were no stores after
13511 it that might have clobbered the value. We don't have alias info, so we
13512 assume any store invalidates it. Moreover, we only have local UIDs, so
13513 we also assume that there were stores in the intervening basic blocks. */
13514 else if (MEM_P (x) && !MEM_READONLY_P (x)
13515 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13517 if (replace)
13518 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13519 return replace;
13522 for (i = 0; i < len; i++)
13524 if (fmt[i] == 'e')
13526 /* Check for identical subexpressions. If x contains
13527 identical subexpression we only have to traverse one of
13528 them. */
13529 if (i == 1 && ARITHMETIC_P (x))
13531 /* Note that at this point x0 has already been checked
13532 and found valid. */
13533 rtx x0 = XEXP (x, 0);
13534 rtx x1 = XEXP (x, 1);
13536 /* If x0 and x1 are identical then x is also valid. */
13537 if (x0 == x1)
13538 return 1;
13540 /* If x1 is identical to a subexpression of x0 then
13541 while checking x0, x1 has already been checked. Thus
13542 it is valid and so as x. */
13543 if (ARITHMETIC_P (x0)
13544 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13545 return 1;
13547 /* If x0 is identical to a subexpression of x1 then x is
13548 valid iff the rest of x1 is valid. */
13549 if (ARITHMETIC_P (x1)
13550 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13551 return
13552 get_last_value_validate (&XEXP (x1,
13553 x0 == XEXP (x1, 0) ? 1 : 0),
13554 insn, tick, replace);
13557 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13558 replace) == 0)
13559 return 0;
13561 else if (fmt[i] == 'E')
13562 for (j = 0; j < XVECLEN (x, i); j++)
13563 if (get_last_value_validate (&XVECEXP (x, i, j),
13564 insn, tick, replace) == 0)
13565 return 0;
13568 /* If we haven't found a reason for it to be invalid, it is valid. */
13569 return 1;
13572 /* Get the last value assigned to X, if known. Some registers
13573 in the value may be replaced with (clobber (const_int 0)) if their value
13574 is known longer known reliably. */
13576 static rtx
13577 get_last_value (const_rtx x)
13579 unsigned int regno;
13580 rtx value;
13581 reg_stat_type *rsp;
13583 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13584 then convert it to the desired mode. If this is a paradoxical SUBREG,
13585 we cannot predict what values the "extra" bits might have. */
13586 if (GET_CODE (x) == SUBREG
13587 && subreg_lowpart_p (x)
13588 && !paradoxical_subreg_p (x)
13589 && (value = get_last_value (SUBREG_REG (x))) != 0)
13590 return gen_lowpart (GET_MODE (x), value);
13592 if (!REG_P (x))
13593 return 0;
13595 regno = REGNO (x);
13596 rsp = &reg_stat[regno];
13597 value = rsp->last_set_value;
13599 /* If we don't have a value, or if it isn't for this basic block and
13600 it's either a hard register, set more than once, or it's a live
13601 at the beginning of the function, return 0.
13603 Because if it's not live at the beginning of the function then the reg
13604 is always set before being used (is never used without being set).
13605 And, if it's set only once, and it's always set before use, then all
13606 uses must have the same last value, even if it's not from this basic
13607 block. */
13609 if (value == 0
13610 || (rsp->last_set_label < label_tick_ebb_start
13611 && (regno < FIRST_PSEUDO_REGISTER
13612 || regno >= reg_n_sets_max
13613 || REG_N_SETS (regno) != 1
13614 || REGNO_REG_SET_P
13615 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13616 return 0;
13618 /* If the value was set in a later insn than the ones we are processing,
13619 we can't use it even if the register was only set once. */
13620 if (rsp->last_set_label == label_tick
13621 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13622 return 0;
13624 /* If fewer bits were set than what we are asked for now, we cannot use
13625 the value. */
13626 if (GET_MODE_PRECISION (rsp->last_set_mode)
13627 < GET_MODE_PRECISION (GET_MODE (x)))
13628 return 0;
13630 /* If the value has all its registers valid, return it. */
13631 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13632 return value;
13634 /* Otherwise, make a copy and replace any invalid register with
13635 (clobber (const_int 0)). If that fails for some reason, return 0. */
13637 value = copy_rtx (value);
13638 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13639 return value;
13641 return 0;
13644 /* Return nonzero if expression X refers to a REG or to memory
13645 that is set in an instruction more recent than FROM_LUID. */
13647 static int
13648 use_crosses_set_p (const_rtx x, int from_luid)
13650 const char *fmt;
13651 int i;
13652 enum rtx_code code = GET_CODE (x);
13654 if (code == REG)
13656 unsigned int regno = REGNO (x);
13657 unsigned endreg = END_REGNO (x);
13659 #ifdef PUSH_ROUNDING
13660 /* Don't allow uses of the stack pointer to be moved,
13661 because we don't know whether the move crosses a push insn. */
13662 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13663 return 1;
13664 #endif
13665 for (; regno < endreg; regno++)
13667 reg_stat_type *rsp = &reg_stat[regno];
13668 if (rsp->last_set
13669 && rsp->last_set_label == label_tick
13670 && DF_INSN_LUID (rsp->last_set) > from_luid)
13671 return 1;
13673 return 0;
13676 if (code == MEM && mem_last_set > from_luid)
13677 return 1;
13679 fmt = GET_RTX_FORMAT (code);
13681 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13683 if (fmt[i] == 'E')
13685 int j;
13686 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13687 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13688 return 1;
13690 else if (fmt[i] == 'e'
13691 && use_crosses_set_p (XEXP (x, i), from_luid))
13692 return 1;
13694 return 0;
13697 /* Define three variables used for communication between the following
13698 routines. */
13700 static unsigned int reg_dead_regno, reg_dead_endregno;
13701 static int reg_dead_flag;
13703 /* Function called via note_stores from reg_dead_at_p.
13705 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13706 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13708 static void
13709 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13711 unsigned int regno, endregno;
13713 if (!REG_P (dest))
13714 return;
13716 regno = REGNO (dest);
13717 endregno = END_REGNO (dest);
13718 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13719 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13722 /* Return nonzero if REG is known to be dead at INSN.
13724 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13725 referencing REG, it is dead. If we hit a SET referencing REG, it is
13726 live. Otherwise, see if it is live or dead at the start of the basic
13727 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13728 must be assumed to be always live. */
13730 static int
13731 reg_dead_at_p (rtx reg, rtx_insn *insn)
13733 basic_block block;
13734 unsigned int i;
13736 /* Set variables for reg_dead_at_p_1. */
13737 reg_dead_regno = REGNO (reg);
13738 reg_dead_endregno = END_REGNO (reg);
13740 reg_dead_flag = 0;
13742 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13743 we allow the machine description to decide whether use-and-clobber
13744 patterns are OK. */
13745 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13747 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13748 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13749 return 0;
13752 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13753 beginning of basic block. */
13754 block = BLOCK_FOR_INSN (insn);
13755 for (;;)
13757 if (INSN_P (insn))
13759 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13760 return 1;
13762 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13763 if (reg_dead_flag)
13764 return reg_dead_flag == 1 ? 1 : 0;
13766 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13767 return 1;
13770 if (insn == BB_HEAD (block))
13771 break;
13773 insn = PREV_INSN (insn);
13776 /* Look at live-in sets for the basic block that we were in. */
13777 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13778 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13779 return 0;
13781 return 1;
13784 /* Note hard registers in X that are used. */
13786 static void
13787 mark_used_regs_combine (rtx x)
13789 RTX_CODE code = GET_CODE (x);
13790 unsigned int regno;
13791 int i;
13793 switch (code)
13795 case LABEL_REF:
13796 case SYMBOL_REF:
13797 case CONST:
13798 CASE_CONST_ANY:
13799 case PC:
13800 case ADDR_VEC:
13801 case ADDR_DIFF_VEC:
13802 case ASM_INPUT:
13803 /* CC0 must die in the insn after it is set, so we don't need to take
13804 special note of it here. */
13805 case CC0:
13806 return;
13808 case CLOBBER:
13809 /* If we are clobbering a MEM, mark any hard registers inside the
13810 address as used. */
13811 if (MEM_P (XEXP (x, 0)))
13812 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13813 return;
13815 case REG:
13816 regno = REGNO (x);
13817 /* A hard reg in a wide mode may really be multiple registers.
13818 If so, mark all of them just like the first. */
13819 if (regno < FIRST_PSEUDO_REGISTER)
13821 /* None of this applies to the stack, frame or arg pointers. */
13822 if (regno == STACK_POINTER_REGNUM
13823 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13824 && regno == HARD_FRAME_POINTER_REGNUM)
13825 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13826 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13827 || regno == FRAME_POINTER_REGNUM)
13828 return;
13830 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13832 return;
13834 case SET:
13836 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13837 the address. */
13838 rtx testreg = SET_DEST (x);
13840 while (GET_CODE (testreg) == SUBREG
13841 || GET_CODE (testreg) == ZERO_EXTRACT
13842 || GET_CODE (testreg) == STRICT_LOW_PART)
13843 testreg = XEXP (testreg, 0);
13845 if (MEM_P (testreg))
13846 mark_used_regs_combine (XEXP (testreg, 0));
13848 mark_used_regs_combine (SET_SRC (x));
13850 return;
13852 default:
13853 break;
13856 /* Recursively scan the operands of this expression. */
13859 const char *fmt = GET_RTX_FORMAT (code);
13861 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13863 if (fmt[i] == 'e')
13864 mark_used_regs_combine (XEXP (x, i));
13865 else if (fmt[i] == 'E')
13867 int j;
13869 for (j = 0; j < XVECLEN (x, i); j++)
13870 mark_used_regs_combine (XVECEXP (x, i, j));
13876 /* Remove register number REGNO from the dead registers list of INSN.
13878 Return the note used to record the death, if there was one. */
13881 remove_death (unsigned int regno, rtx_insn *insn)
13883 rtx note = find_regno_note (insn, REG_DEAD, regno);
13885 if (note)
13886 remove_note (insn, note);
13888 return note;
13891 /* For each register (hardware or pseudo) used within expression X, if its
13892 death is in an instruction with luid between FROM_LUID (inclusive) and
13893 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13894 list headed by PNOTES.
13896 That said, don't move registers killed by maybe_kill_insn.
13898 This is done when X is being merged by combination into TO_INSN. These
13899 notes will then be distributed as needed. */
13901 static void
13902 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13903 rtx *pnotes)
13905 const char *fmt;
13906 int len, i;
13907 enum rtx_code code = GET_CODE (x);
13909 if (code == REG)
13911 unsigned int regno = REGNO (x);
13912 rtx_insn *where_dead = reg_stat[regno].last_death;
13914 /* Don't move the register if it gets killed in between from and to. */
13915 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13916 && ! reg_referenced_p (x, maybe_kill_insn))
13917 return;
13919 if (where_dead
13920 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13921 && DF_INSN_LUID (where_dead) >= from_luid
13922 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13924 rtx note = remove_death (regno, where_dead);
13926 /* It is possible for the call above to return 0. This can occur
13927 when last_death points to I2 or I1 that we combined with.
13928 In that case make a new note.
13930 We must also check for the case where X is a hard register
13931 and NOTE is a death note for a range of hard registers
13932 including X. In that case, we must put REG_DEAD notes for
13933 the remaining registers in place of NOTE. */
13935 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13936 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13938 unsigned int deadregno = REGNO (XEXP (note, 0));
13939 unsigned int deadend = END_REGNO (XEXP (note, 0));
13940 unsigned int ourend = END_REGNO (x);
13941 unsigned int i;
13943 for (i = deadregno; i < deadend; i++)
13944 if (i < regno || i >= ourend)
13945 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13948 /* If we didn't find any note, or if we found a REG_DEAD note that
13949 covers only part of the given reg, and we have a multi-reg hard
13950 register, then to be safe we must check for REG_DEAD notes
13951 for each register other than the first. They could have
13952 their own REG_DEAD notes lying around. */
13953 else if ((note == 0
13954 || (note != 0
13955 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
13956 GET_MODE (x))))
13957 && regno < FIRST_PSEUDO_REGISTER
13958 && REG_NREGS (x) > 1)
13960 unsigned int ourend = END_REGNO (x);
13961 unsigned int i, offset;
13962 rtx oldnotes = 0;
13964 if (note)
13965 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
13966 else
13967 offset = 1;
13969 for (i = regno + offset; i < ourend; i++)
13970 move_deaths (regno_reg_rtx[i],
13971 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13974 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13976 XEXP (note, 1) = *pnotes;
13977 *pnotes = note;
13979 else
13980 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13983 return;
13986 else if (GET_CODE (x) == SET)
13988 rtx dest = SET_DEST (x);
13990 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13992 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13993 that accesses one word of a multi-word item, some
13994 piece of everything register in the expression is used by
13995 this insn, so remove any old death. */
13996 /* ??? So why do we test for equality of the sizes? */
13998 if (GET_CODE (dest) == ZERO_EXTRACT
13999 || GET_CODE (dest) == STRICT_LOW_PART
14000 || (GET_CODE (dest) == SUBREG
14001 && (((GET_MODE_SIZE (GET_MODE (dest))
14002 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
14003 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
14004 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
14006 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14007 return;
14010 /* If this is some other SUBREG, we know it replaces the entire
14011 value, so use that as the destination. */
14012 if (GET_CODE (dest) == SUBREG)
14013 dest = SUBREG_REG (dest);
14015 /* If this is a MEM, adjust deaths of anything used in the address.
14016 For a REG (the only other possibility), the entire value is
14017 being replaced so the old value is not used in this insn. */
14019 if (MEM_P (dest))
14020 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14021 to_insn, pnotes);
14022 return;
14025 else if (GET_CODE (x) == CLOBBER)
14026 return;
14028 len = GET_RTX_LENGTH (code);
14029 fmt = GET_RTX_FORMAT (code);
14031 for (i = 0; i < len; i++)
14033 if (fmt[i] == 'E')
14035 int j;
14036 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14037 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14038 to_insn, pnotes);
14040 else if (fmt[i] == 'e')
14041 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14045 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14046 pattern of an insn. X must be a REG. */
14048 static int
14049 reg_bitfield_target_p (rtx x, rtx body)
14051 int i;
14053 if (GET_CODE (body) == SET)
14055 rtx dest = SET_DEST (body);
14056 rtx target;
14057 unsigned int regno, tregno, endregno, endtregno;
14059 if (GET_CODE (dest) == ZERO_EXTRACT)
14060 target = XEXP (dest, 0);
14061 else if (GET_CODE (dest) == STRICT_LOW_PART)
14062 target = SUBREG_REG (XEXP (dest, 0));
14063 else
14064 return 0;
14066 if (GET_CODE (target) == SUBREG)
14067 target = SUBREG_REG (target);
14069 if (!REG_P (target))
14070 return 0;
14072 tregno = REGNO (target), regno = REGNO (x);
14073 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14074 return target == x;
14076 endtregno = end_hard_regno (GET_MODE (target), tregno);
14077 endregno = end_hard_regno (GET_MODE (x), regno);
14079 return endregno > tregno && regno < endtregno;
14082 else if (GET_CODE (body) == PARALLEL)
14083 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14084 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14085 return 1;
14087 return 0;
14090 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14091 as appropriate. I3 and I2 are the insns resulting from the combination
14092 insns including FROM (I2 may be zero).
14094 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14095 not need REG_DEAD notes because they are being substituted for. This
14096 saves searching in the most common cases.
14098 Each note in the list is either ignored or placed on some insns, depending
14099 on the type of note. */
14101 static void
14102 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14103 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14105 rtx note, next_note;
14106 rtx tem_note;
14107 rtx_insn *tem_insn;
14109 for (note = notes; note; note = next_note)
14111 rtx_insn *place = 0, *place2 = 0;
14113 next_note = XEXP (note, 1);
14114 switch (REG_NOTE_KIND (note))
14116 case REG_BR_PROB:
14117 case REG_BR_PRED:
14118 /* Doesn't matter much where we put this, as long as it's somewhere.
14119 It is preferable to keep these notes on branches, which is most
14120 likely to be i3. */
14121 place = i3;
14122 break;
14124 case REG_NON_LOCAL_GOTO:
14125 if (JUMP_P (i3))
14126 place = i3;
14127 else
14129 gcc_assert (i2 && JUMP_P (i2));
14130 place = i2;
14132 break;
14134 case REG_EH_REGION:
14135 /* These notes must remain with the call or trapping instruction. */
14136 if (CALL_P (i3))
14137 place = i3;
14138 else if (i2 && CALL_P (i2))
14139 place = i2;
14140 else
14142 gcc_assert (cfun->can_throw_non_call_exceptions);
14143 if (may_trap_p (i3))
14144 place = i3;
14145 else if (i2 && may_trap_p (i2))
14146 place = i2;
14147 /* ??? Otherwise assume we've combined things such that we
14148 can now prove that the instructions can't trap. Drop the
14149 note in this case. */
14151 break;
14153 case REG_ARGS_SIZE:
14154 /* ??? How to distribute between i3-i1. Assume i3 contains the
14155 entire adjustment. Assert i3 contains at least some adjust. */
14156 if (!noop_move_p (i3))
14158 int old_size, args_size = INTVAL (XEXP (note, 0));
14159 /* fixup_args_size_notes looks at REG_NORETURN note,
14160 so ensure the note is placed there first. */
14161 if (CALL_P (i3))
14163 rtx *np;
14164 for (np = &next_note; *np; np = &XEXP (*np, 1))
14165 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14167 rtx n = *np;
14168 *np = XEXP (n, 1);
14169 XEXP (n, 1) = REG_NOTES (i3);
14170 REG_NOTES (i3) = n;
14171 break;
14174 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14175 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14176 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14177 gcc_assert (old_size != args_size
14178 || (CALL_P (i3)
14179 && !ACCUMULATE_OUTGOING_ARGS
14180 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14182 break;
14184 case REG_NORETURN:
14185 case REG_SETJMP:
14186 case REG_TM:
14187 case REG_CALL_DECL:
14188 /* These notes must remain with the call. It should not be
14189 possible for both I2 and I3 to be a call. */
14190 if (CALL_P (i3))
14191 place = i3;
14192 else
14194 gcc_assert (i2 && CALL_P (i2));
14195 place = i2;
14197 break;
14199 case REG_UNUSED:
14200 /* Any clobbers for i3 may still exist, and so we must process
14201 REG_UNUSED notes from that insn.
14203 Any clobbers from i2 or i1 can only exist if they were added by
14204 recog_for_combine. In that case, recog_for_combine created the
14205 necessary REG_UNUSED notes. Trying to keep any original
14206 REG_UNUSED notes from these insns can cause incorrect output
14207 if it is for the same register as the original i3 dest.
14208 In that case, we will notice that the register is set in i3,
14209 and then add a REG_UNUSED note for the destination of i3, which
14210 is wrong. However, it is possible to have REG_UNUSED notes from
14211 i2 or i1 for register which were both used and clobbered, so
14212 we keep notes from i2 or i1 if they will turn into REG_DEAD
14213 notes. */
14215 /* If this register is set or clobbered in I3, put the note there
14216 unless there is one already. */
14217 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14219 if (from_insn != i3)
14220 break;
14222 if (! (REG_P (XEXP (note, 0))
14223 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14224 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14225 place = i3;
14227 /* Otherwise, if this register is used by I3, then this register
14228 now dies here, so we must put a REG_DEAD note here unless there
14229 is one already. */
14230 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14231 && ! (REG_P (XEXP (note, 0))
14232 ? find_regno_note (i3, REG_DEAD,
14233 REGNO (XEXP (note, 0)))
14234 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14236 PUT_REG_NOTE_KIND (note, REG_DEAD);
14237 place = i3;
14239 break;
14241 case REG_EQUAL:
14242 case REG_EQUIV:
14243 case REG_NOALIAS:
14244 /* These notes say something about results of an insn. We can
14245 only support them if they used to be on I3 in which case they
14246 remain on I3. Otherwise they are ignored.
14248 If the note refers to an expression that is not a constant, we
14249 must also ignore the note since we cannot tell whether the
14250 equivalence is still true. It might be possible to do
14251 slightly better than this (we only have a problem if I2DEST
14252 or I1DEST is present in the expression), but it doesn't
14253 seem worth the trouble. */
14255 if (from_insn == i3
14256 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14257 place = i3;
14258 break;
14260 case REG_INC:
14261 /* These notes say something about how a register is used. They must
14262 be present on any use of the register in I2 or I3. */
14263 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14264 place = i3;
14266 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14268 if (place)
14269 place2 = i2;
14270 else
14271 place = i2;
14273 break;
14275 case REG_LABEL_TARGET:
14276 case REG_LABEL_OPERAND:
14277 /* This can show up in several ways -- either directly in the
14278 pattern, or hidden off in the constant pool with (or without?)
14279 a REG_EQUAL note. */
14280 /* ??? Ignore the without-reg_equal-note problem for now. */
14281 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14282 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14283 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14284 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14285 place = i3;
14287 if (i2
14288 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14289 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14290 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14291 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14293 if (place)
14294 place2 = i2;
14295 else
14296 place = i2;
14299 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14300 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14301 there. */
14302 if (place && JUMP_P (place)
14303 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14304 && (JUMP_LABEL (place) == NULL
14305 || JUMP_LABEL (place) == XEXP (note, 0)))
14307 rtx label = JUMP_LABEL (place);
14309 if (!label)
14310 JUMP_LABEL (place) = XEXP (note, 0);
14311 else if (LABEL_P (label))
14312 LABEL_NUSES (label)--;
14315 if (place2 && JUMP_P (place2)
14316 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14317 && (JUMP_LABEL (place2) == NULL
14318 || JUMP_LABEL (place2) == XEXP (note, 0)))
14320 rtx label = JUMP_LABEL (place2);
14322 if (!label)
14323 JUMP_LABEL (place2) = XEXP (note, 0);
14324 else if (LABEL_P (label))
14325 LABEL_NUSES (label)--;
14326 place2 = 0;
14328 break;
14330 case REG_NONNEG:
14331 /* This note says something about the value of a register prior
14332 to the execution of an insn. It is too much trouble to see
14333 if the note is still correct in all situations. It is better
14334 to simply delete it. */
14335 break;
14337 case REG_DEAD:
14338 /* If we replaced the right hand side of FROM_INSN with a
14339 REG_EQUAL note, the original use of the dying register
14340 will not have been combined into I3 and I2. In such cases,
14341 FROM_INSN is guaranteed to be the first of the combined
14342 instructions, so we simply need to search back before
14343 FROM_INSN for the previous use or set of this register,
14344 then alter the notes there appropriately.
14346 If the register is used as an input in I3, it dies there.
14347 Similarly for I2, if it is nonzero and adjacent to I3.
14349 If the register is not used as an input in either I3 or I2
14350 and it is not one of the registers we were supposed to eliminate,
14351 there are two possibilities. We might have a non-adjacent I2
14352 or we might have somehow eliminated an additional register
14353 from a computation. For example, we might have had A & B where
14354 we discover that B will always be zero. In this case we will
14355 eliminate the reference to A.
14357 In both cases, we must search to see if we can find a previous
14358 use of A and put the death note there. */
14360 if (from_insn
14361 && from_insn == i2mod
14362 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14363 tem_insn = from_insn;
14364 else
14366 if (from_insn
14367 && CALL_P (from_insn)
14368 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14369 place = from_insn;
14370 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14371 place = i3;
14372 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14373 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14374 place = i2;
14375 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14376 && !(i2mod
14377 && reg_overlap_mentioned_p (XEXP (note, 0),
14378 i2mod_old_rhs)))
14379 || rtx_equal_p (XEXP (note, 0), elim_i1)
14380 || rtx_equal_p (XEXP (note, 0), elim_i0))
14381 break;
14382 tem_insn = i3;
14383 /* If the new I2 sets the same register that is marked dead
14384 in the note, we do not know where to put the note.
14385 Give up. */
14386 if (i2 != 0 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14387 break;
14390 if (place == 0)
14392 basic_block bb = this_basic_block;
14394 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14396 if (!NONDEBUG_INSN_P (tem_insn))
14398 if (tem_insn == BB_HEAD (bb))
14399 break;
14400 continue;
14403 /* If the register is being set at TEM_INSN, see if that is all
14404 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14405 into a REG_UNUSED note instead. Don't delete sets to
14406 global register vars. */
14407 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14408 || !global_regs[REGNO (XEXP (note, 0))])
14409 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14411 rtx set = single_set (tem_insn);
14412 rtx inner_dest = 0;
14413 rtx_insn *cc0_setter = NULL;
14415 if (set != 0)
14416 for (inner_dest = SET_DEST (set);
14417 (GET_CODE (inner_dest) == STRICT_LOW_PART
14418 || GET_CODE (inner_dest) == SUBREG
14419 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14420 inner_dest = XEXP (inner_dest, 0))
14423 /* Verify that it was the set, and not a clobber that
14424 modified the register.
14426 CC0 targets must be careful to maintain setter/user
14427 pairs. If we cannot delete the setter due to side
14428 effects, mark the user with an UNUSED note instead
14429 of deleting it. */
14431 if (set != 0 && ! side_effects_p (SET_SRC (set))
14432 && rtx_equal_p (XEXP (note, 0), inner_dest)
14433 && (!HAVE_cc0
14434 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14435 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14436 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14438 /* Move the notes and links of TEM_INSN elsewhere.
14439 This might delete other dead insns recursively.
14440 First set the pattern to something that won't use
14441 any register. */
14442 rtx old_notes = REG_NOTES (tem_insn);
14444 PATTERN (tem_insn) = pc_rtx;
14445 REG_NOTES (tem_insn) = NULL;
14447 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14448 NULL_RTX, NULL_RTX, NULL_RTX);
14449 distribute_links (LOG_LINKS (tem_insn));
14451 unsigned int regno = REGNO (XEXP (note, 0));
14452 reg_stat_type *rsp = &reg_stat[regno];
14453 if (rsp->last_set == tem_insn)
14454 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14456 SET_INSN_DELETED (tem_insn);
14457 if (tem_insn == i2)
14458 i2 = NULL;
14460 /* Delete the setter too. */
14461 if (cc0_setter)
14463 PATTERN (cc0_setter) = pc_rtx;
14464 old_notes = REG_NOTES (cc0_setter);
14465 REG_NOTES (cc0_setter) = NULL;
14467 distribute_notes (old_notes, cc0_setter,
14468 cc0_setter, NULL,
14469 NULL_RTX, NULL_RTX, NULL_RTX);
14470 distribute_links (LOG_LINKS (cc0_setter));
14472 SET_INSN_DELETED (cc0_setter);
14473 if (cc0_setter == i2)
14474 i2 = NULL;
14477 else
14479 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14481 /* If there isn't already a REG_UNUSED note, put one
14482 here. Do not place a REG_DEAD note, even if
14483 the register is also used here; that would not
14484 match the algorithm used in lifetime analysis
14485 and can cause the consistency check in the
14486 scheduler to fail. */
14487 if (! find_regno_note (tem_insn, REG_UNUSED,
14488 REGNO (XEXP (note, 0))))
14489 place = tem_insn;
14490 break;
14493 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14494 || (CALL_P (tem_insn)
14495 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14497 place = tem_insn;
14499 /* If we are doing a 3->2 combination, and we have a
14500 register which formerly died in i3 and was not used
14501 by i2, which now no longer dies in i3 and is used in
14502 i2 but does not die in i2, and place is between i2
14503 and i3, then we may need to move a link from place to
14504 i2. */
14505 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14506 && from_insn
14507 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14508 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14510 struct insn_link *links = LOG_LINKS (place);
14511 LOG_LINKS (place) = NULL;
14512 distribute_links (links);
14514 break;
14517 if (tem_insn == BB_HEAD (bb))
14518 break;
14523 /* If the register is set or already dead at PLACE, we needn't do
14524 anything with this note if it is still a REG_DEAD note.
14525 We check here if it is set at all, not if is it totally replaced,
14526 which is what `dead_or_set_p' checks, so also check for it being
14527 set partially. */
14529 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14531 unsigned int regno = REGNO (XEXP (note, 0));
14532 reg_stat_type *rsp = &reg_stat[regno];
14534 if (dead_or_set_p (place, XEXP (note, 0))
14535 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14537 /* Unless the register previously died in PLACE, clear
14538 last_death. [I no longer understand why this is
14539 being done.] */
14540 if (rsp->last_death != place)
14541 rsp->last_death = 0;
14542 place = 0;
14544 else
14545 rsp->last_death = place;
14547 /* If this is a death note for a hard reg that is occupying
14548 multiple registers, ensure that we are still using all
14549 parts of the object. If we find a piece of the object
14550 that is unused, we must arrange for an appropriate REG_DEAD
14551 note to be added for it. However, we can't just emit a USE
14552 and tag the note to it, since the register might actually
14553 be dead; so we recourse, and the recursive call then finds
14554 the previous insn that used this register. */
14556 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14558 unsigned int endregno = END_REGNO (XEXP (note, 0));
14559 bool all_used = true;
14560 unsigned int i;
14562 for (i = regno; i < endregno; i++)
14563 if ((! refers_to_regno_p (i, PATTERN (place))
14564 && ! find_regno_fusage (place, USE, i))
14565 || dead_or_set_regno_p (place, i))
14567 all_used = false;
14568 break;
14571 if (! all_used)
14573 /* Put only REG_DEAD notes for pieces that are
14574 not already dead or set. */
14576 for (i = regno; i < endregno;
14577 i += hard_regno_nregs (i, reg_raw_mode[i]))
14579 rtx piece = regno_reg_rtx[i];
14580 basic_block bb = this_basic_block;
14582 if (! dead_or_set_p (place, piece)
14583 && ! reg_bitfield_target_p (piece,
14584 PATTERN (place)))
14586 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14587 NULL_RTX);
14589 distribute_notes (new_note, place, place,
14590 NULL, NULL_RTX, NULL_RTX,
14591 NULL_RTX);
14593 else if (! refers_to_regno_p (i, PATTERN (place))
14594 && ! find_regno_fusage (place, USE, i))
14595 for (tem_insn = PREV_INSN (place); ;
14596 tem_insn = PREV_INSN (tem_insn))
14598 if (!NONDEBUG_INSN_P (tem_insn))
14600 if (tem_insn == BB_HEAD (bb))
14601 break;
14602 continue;
14604 if (dead_or_set_p (tem_insn, piece)
14605 || reg_bitfield_target_p (piece,
14606 PATTERN (tem_insn)))
14608 add_reg_note (tem_insn, REG_UNUSED, piece);
14609 break;
14614 place = 0;
14618 break;
14620 default:
14621 /* Any other notes should not be present at this point in the
14622 compilation. */
14623 gcc_unreachable ();
14626 if (place)
14628 XEXP (note, 1) = REG_NOTES (place);
14629 REG_NOTES (place) = note;
14632 if (place2)
14633 add_shallow_copy_of_reg_note (place2, note);
14637 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14638 I3, I2, and I1 to new locations. This is also called to add a link
14639 pointing at I3 when I3's destination is changed. */
14641 static void
14642 distribute_links (struct insn_link *links)
14644 struct insn_link *link, *next_link;
14646 for (link = links; link; link = next_link)
14648 rtx_insn *place = 0;
14649 rtx_insn *insn;
14650 rtx set, reg;
14652 next_link = link->next;
14654 /* If the insn that this link points to is a NOTE, ignore it. */
14655 if (NOTE_P (link->insn))
14656 continue;
14658 set = 0;
14659 rtx pat = PATTERN (link->insn);
14660 if (GET_CODE (pat) == SET)
14661 set = pat;
14662 else if (GET_CODE (pat) == PARALLEL)
14664 int i;
14665 for (i = 0; i < XVECLEN (pat, 0); i++)
14667 set = XVECEXP (pat, 0, i);
14668 if (GET_CODE (set) != SET)
14669 continue;
14671 reg = SET_DEST (set);
14672 while (GET_CODE (reg) == ZERO_EXTRACT
14673 || GET_CODE (reg) == STRICT_LOW_PART
14674 || GET_CODE (reg) == SUBREG)
14675 reg = XEXP (reg, 0);
14677 if (!REG_P (reg))
14678 continue;
14680 if (REGNO (reg) == link->regno)
14681 break;
14683 if (i == XVECLEN (pat, 0))
14684 continue;
14686 else
14687 continue;
14689 reg = SET_DEST (set);
14691 while (GET_CODE (reg) == ZERO_EXTRACT
14692 || GET_CODE (reg) == STRICT_LOW_PART
14693 || GET_CODE (reg) == SUBREG)
14694 reg = XEXP (reg, 0);
14696 /* A LOG_LINK is defined as being placed on the first insn that uses
14697 a register and points to the insn that sets the register. Start
14698 searching at the next insn after the target of the link and stop
14699 when we reach a set of the register or the end of the basic block.
14701 Note that this correctly handles the link that used to point from
14702 I3 to I2. Also note that not much searching is typically done here
14703 since most links don't point very far away. */
14705 for (insn = NEXT_INSN (link->insn);
14706 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14707 || BB_HEAD (this_basic_block->next_bb) != insn));
14708 insn = NEXT_INSN (insn))
14709 if (DEBUG_INSN_P (insn))
14710 continue;
14711 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14713 if (reg_referenced_p (reg, PATTERN (insn)))
14714 place = insn;
14715 break;
14717 else if (CALL_P (insn)
14718 && find_reg_fusage (insn, USE, reg))
14720 place = insn;
14721 break;
14723 else if (INSN_P (insn) && reg_set_p (reg, insn))
14724 break;
14726 /* If we found a place to put the link, place it there unless there
14727 is already a link to the same insn as LINK at that point. */
14729 if (place)
14731 struct insn_link *link2;
14733 FOR_EACH_LOG_LINK (link2, place)
14734 if (link2->insn == link->insn && link2->regno == link->regno)
14735 break;
14737 if (link2 == NULL)
14739 link->next = LOG_LINKS (place);
14740 LOG_LINKS (place) = link;
14742 /* Set added_links_insn to the earliest insn we added a
14743 link to. */
14744 if (added_links_insn == 0
14745 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14746 added_links_insn = place;
14752 /* Check for any register or memory mentioned in EQUIV that is not
14753 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14754 of EXPR where some registers may have been replaced by constants. */
14756 static bool
14757 unmentioned_reg_p (rtx equiv, rtx expr)
14759 subrtx_iterator::array_type array;
14760 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14762 const_rtx x = *iter;
14763 if ((REG_P (x) || MEM_P (x))
14764 && !reg_mentioned_p (x, expr))
14765 return true;
14767 return false;
14770 DEBUG_FUNCTION void
14771 dump_combine_stats (FILE *file)
14773 fprintf
14774 (file,
14775 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14776 combine_attempts, combine_merges, combine_extras, combine_successes);
14779 void
14780 dump_combine_total_stats (FILE *file)
14782 fprintf
14783 (file,
14784 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14785 total_attempts, total_merges, total_extras, total_successes);
14788 /* Try combining insns through substitution. */
14789 static unsigned int
14790 rest_of_handle_combine (void)
14792 int rebuild_jump_labels_after_combine;
14794 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14795 df_note_add_problem ();
14796 df_analyze ();
14798 regstat_init_n_sets_and_refs ();
14799 reg_n_sets_max = max_reg_num ();
14801 rebuild_jump_labels_after_combine
14802 = combine_instructions (get_insns (), max_reg_num ());
14804 /* Combining insns may have turned an indirect jump into a
14805 direct jump. Rebuild the JUMP_LABEL fields of jumping
14806 instructions. */
14807 if (rebuild_jump_labels_after_combine)
14809 if (dom_info_available_p (CDI_DOMINATORS))
14810 free_dominance_info (CDI_DOMINATORS);
14811 timevar_push (TV_JUMP);
14812 rebuild_jump_labels (get_insns ());
14813 cleanup_cfg (0);
14814 timevar_pop (TV_JUMP);
14817 regstat_free_n_sets_and_refs ();
14818 return 0;
14821 namespace {
14823 const pass_data pass_data_combine =
14825 RTL_PASS, /* type */
14826 "combine", /* name */
14827 OPTGROUP_NONE, /* optinfo_flags */
14828 TV_COMBINE, /* tv_id */
14829 PROP_cfglayout, /* properties_required */
14830 0, /* properties_provided */
14831 0, /* properties_destroyed */
14832 0, /* todo_flags_start */
14833 TODO_df_finish, /* todo_flags_finish */
14836 class pass_combine : public rtl_opt_pass
14838 public:
14839 pass_combine (gcc::context *ctxt)
14840 : rtl_opt_pass (pass_data_combine, ctxt)
14843 /* opt_pass methods: */
14844 virtual bool gate (function *) { return (optimize > 0); }
14845 virtual unsigned int execute (function *)
14847 return rest_of_handle_combine ();
14850 }; // class pass_combine
14852 } // anon namespace
14854 rtl_opt_pass *
14855 make_pass_combine (gcc::context *ctxt)
14857 return new pass_combine (ctxt);